From 5d8f03e241367ee6b8e15269041c28c86ca60042 Mon Sep 17 00:00:00 2001 From: Viktor Torstensson Date: Thu, 4 Sep 2025 01:08:06 +0200 Subject: [PATCH 1/5] sqlc+firewalldb: add `GetAction` SQL query Add a new SQL query `GetAction` to retrieve a single action by its ID. This query will be needed for the kvdb to SQL migration of actions store. --- db/sqlc/actions.sql.go | 28 ++++++++++++++++++++++++++++ db/sqlc/querier.go | 1 + db/sqlc/queries/actions.sql | 6 ++++++ firewalldb/actions_sql.go | 1 + 4 files changed, 36 insertions(+) diff --git a/db/sqlc/actions.sql.go b/db/sqlc/actions.sql.go index 4a3e8c891..294bcdb1f 100644 --- a/db/sqlc/actions.sql.go +++ b/db/sqlc/actions.sql.go @@ -11,6 +11,34 @@ import ( "time" ) +const getAction = `-- name: GetAction :one +SELECT id, session_id, account_id, macaroon_identifier, actor_name, feature_name, action_trigger, intent, structured_json_data, rpc_method, rpc_params_json, created_at, action_state, error_reason +FROM actions +WHERE id = $1 +` + +func (q *Queries) GetAction(ctx context.Context, id int64) (Action, error) { + row := q.db.QueryRowContext(ctx, getAction, id) + var i Action + err := row.Scan( + &i.ID, + &i.SessionID, + &i.AccountID, + &i.MacaroonIdentifier, + &i.ActorName, + &i.FeatureName, + &i.ActionTrigger, + &i.Intent, + &i.StructuredJsonData, + &i.RpcMethod, + &i.RpcParamsJson, + &i.CreatedAt, + &i.ActionState, + &i.ErrorReason, + ) + return i, err +} + const insertAction = `-- name: InsertAction :one INSERT INTO actions ( session_id, account_id, macaroon_identifier, actor_name, feature_name, action_trigger, diff --git a/db/sqlc/querier.go b/db/sqlc/querier.go index d76d5e6e3..4ff08707d 100644 --- a/db/sqlc/querier.go +++ b/db/sqlc/querier.go @@ -24,6 +24,7 @@ type Querier interface { GetAccountIndex(ctx context.Context, name string) (int64, error) GetAccountInvoice(ctx context.Context, arg GetAccountInvoiceParams) (AccountInvoice, error) GetAccountPayment(ctx context.Context, arg GetAccountPaymentParams) (AccountPayment, error) + GetAction(ctx context.Context, id int64) (Action, error) GetAliasBySessionID(ctx context.Context, id int64) ([]byte, error) GetAllPrivacyPairs(ctx context.Context, groupID int64) ([]GetAllPrivacyPairsRow, error) GetFeatureID(ctx context.Context, name string) (int64, error) diff --git a/db/sqlc/queries/actions.sql b/db/sqlc/queries/actions.sql index 2a966022d..d15d673cd 100644 --- a/db/sqlc/queries/actions.sql +++ b/db/sqlc/queries/actions.sql @@ -13,3 +13,9 @@ UPDATE actions SET action_state = $1, error_reason = $2 WHERE id = $3; + + +-- name: GetAction :one +SELECT * +FROM actions +WHERE id = $1; \ No newline at end of file diff --git a/firewalldb/actions_sql.go b/firewalldb/actions_sql.go index 9e2fa63bd..cfa67c18a 100644 --- a/firewalldb/actions_sql.go +++ b/firewalldb/actions_sql.go @@ -35,6 +35,7 @@ type SQLActionQueries interface { SetActionState(ctx context.Context, arg sqlc.SetActionStateParams) error ListActions(ctx context.Context, arg sqlc.ListActionsParams) ([]sqlc.Action, error) CountActions(ctx context.Context, arg sqlc.ActionQueryParams) (int64, error) + GetAction(ctx context.Context, id int64) (sqlc.Action, error) } // sqlActionLocator helps us find an action in the SQL DB. From 4ccb56a7712afb943d08bff09d8e85d5b57504f4 Mon Sep 17 00:00:00 2001 From: Viktor Torstensson Date: Tue, 9 Sep 2025 15:04:07 +0200 Subject: [PATCH 2/5] multi: add `UpdateAccountAliasForTests` query In the upcoming kvdb to SQL migration of the actions store, we need to simulate in tests that two or more accounts have colliding account aliases for the first 4 bytes of the alias. In order to allow creation of such accounts, we need to be able to update the alias of an account in tests, and this commit adds the a SQL query enabling this functionality. Note that the `UpdateAccountAliasForTests` query is only intended for use in tests and should not be used in production code. --- accounts/store_sql.go | 3 +++ db/sqlc/accounts.sql.go | 20 ++++++++++++++++++++ db/sqlc/querier.go | 2 ++ db/sqlc/queries/accounts.sql | 7 +++++++ 4 files changed, 32 insertions(+) diff --git a/accounts/store_sql.go b/accounts/store_sql.go index 830f16587..2fc6d2293 100644 --- a/accounts/store_sql.go +++ b/accounts/store_sql.go @@ -49,6 +49,9 @@ type SQLQueries interface { UpdateAccountBalance(ctx context.Context, arg sqlc.UpdateAccountBalanceParams) (int64, error) UpdateAccountExpiry(ctx context.Context, arg sqlc.UpdateAccountExpiryParams) (int64, error) UpdateAccountLastUpdate(ctx context.Context, arg sqlc.UpdateAccountLastUpdateParams) (int64, error) + // UpdateAccountAliasForTests is a query intended only for testing + // purposes, to change the account alias. + UpdateAccountAliasForTests(ctx context.Context, arg sqlc.UpdateAccountAliasForTestsParams) (int64, error) UpsertAccountPayment(ctx context.Context, arg sqlc.UpsertAccountPaymentParams) error GetAccountInvoice(ctx context.Context, arg sqlc.GetAccountInvoiceParams) (sqlc.AccountInvoice, error) } diff --git a/db/sqlc/accounts.sql.go b/db/sqlc/accounts.sql.go index f6b3fc815..33334dbcd 100644 --- a/db/sqlc/accounts.sql.go +++ b/db/sqlc/accounts.sql.go @@ -313,6 +313,26 @@ func (q *Queries) SetAccountIndex(ctx context.Context, arg SetAccountIndexParams return err } +const updateAccountAliasForTests = `-- name: UpdateAccountAliasForTests :one +UPDATE accounts +SET alias = $1 +WHERE id = $2 + RETURNING id +` + +type UpdateAccountAliasForTestsParams struct { + Alias int64 + ID int64 +} + +// NOTE: This query is only intended for testing purposes. +func (q *Queries) UpdateAccountAliasForTests(ctx context.Context, arg UpdateAccountAliasForTestsParams) (int64, error) { + row := q.db.QueryRowContext(ctx, updateAccountAliasForTests, arg.Alias, arg.ID) + var id int64 + err := row.Scan(&id) + return id, err +} + const updateAccountBalance = `-- name: UpdateAccountBalance :one UPDATE accounts SET current_balance_msat = $1 diff --git a/db/sqlc/querier.go b/db/sqlc/querier.go index 4ff08707d..3e2615030 100644 --- a/db/sqlc/querier.go +++ b/db/sqlc/querier.go @@ -67,6 +67,8 @@ type Querier interface { SetSessionGroupID(ctx context.Context, arg SetSessionGroupIDParams) error SetSessionRemotePublicKey(ctx context.Context, arg SetSessionRemotePublicKeyParams) error SetSessionRevokedAt(ctx context.Context, arg SetSessionRevokedAtParams) error + // NOTE: This query is only intended for testing purposes. + UpdateAccountAliasForTests(ctx context.Context, arg UpdateAccountAliasForTestsParams) (int64, error) UpdateAccountBalance(ctx context.Context, arg UpdateAccountBalanceParams) (int64, error) UpdateAccountExpiry(ctx context.Context, arg UpdateAccountExpiryParams) (int64, error) UpdateAccountLastUpdate(ctx context.Context, arg UpdateAccountLastUpdateParams) (int64, error) diff --git a/db/sqlc/queries/accounts.sql b/db/sqlc/queries/accounts.sql index 9c23c8c4f..3be5ccd55 100644 --- a/db/sqlc/queries/accounts.sql +++ b/db/sqlc/queries/accounts.sql @@ -25,6 +25,13 @@ RETURNING id; INSERT INTO account_invoices (account_id, hash) VALUES ($1, $2); +-- name: UpdateAccountAliasForTests :one +-- NOTE: This query is only intended for testing purposes. +UPDATE accounts +SET alias = $1 +WHERE id = $2 + RETURNING id; + -- name: DeleteAccountPayment :exec DELETE FROM account_payments WHERE hash = $1 From bc9a3b1d35edbc96edfb1074d1a4c0a771fb8f25 Mon Sep 17 00:00:00 2001 From: Viktor Torstensson Date: Thu, 4 Sep 2025 01:16:51 +0200 Subject: [PATCH 3/5] firewalldb: add `expectedAction`s to mig tests res In preparation for the kvdb to SQL migration of the actions store, this commit adds an `actions` field to the expected result of the migration tests. Once the migration is implemented, this field will be used to validate that the migrated actions match the expected results. --- firewalldb/sql_migration_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/firewalldb/sql_migration_test.go b/firewalldb/sql_migration_test.go index 4d71f5cc0..db384bca0 100644 --- a/firewalldb/sql_migration_test.go +++ b/firewalldb/sql_migration_test.go @@ -39,6 +39,7 @@ var ( type expectedResult struct { kvEntries []*kvEntry privPairs privacyPairs + actions []*Action } // TestFirewallDBMigration tests the migration of firewalldb from a bolt @@ -306,6 +307,7 @@ func TestFirewallDBMigration(t *testing.T) { return &expectedResult{ kvEntries: []*kvEntry{}, privPairs: make(privacyPairs), + actions: []*Action{}, } }, }, @@ -545,6 +547,7 @@ func allEntryCombinations(t *testing.T, ctx context.Context, boltDB *BoltDB, return &expectedResult{ kvEntries: result, privPairs: make(privacyPairs), + actions: []*Action{}, } } @@ -594,6 +597,7 @@ func insertTempAndPermEntry(t *testing.T, ctx context.Context, kvEntries: []*kvEntry{tempKvEntry, permKvEntry}, // No privacy pairs are inserted in this test. privPairs: make(privacyPairs), + actions: []*Action{}, } } @@ -758,6 +762,7 @@ func randomKVEntries(t *testing.T, ctx context.Context, kvEntries: insertedEntries, // No privacy pairs are inserted in this test. privPairs: make(privacyPairs), + actions: []*Action{}, } } @@ -836,6 +841,7 @@ func createPrivacyPairs(t *testing.T, ctx context.Context, return &expectedResult{ kvEntries: []*kvEntry{}, privPairs: pairs, + actions: []*Action{}, } } @@ -889,6 +895,7 @@ func randomPrivacyPairs(t *testing.T, ctx context.Context, return &expectedResult{ kvEntries: []*kvEntry{}, privPairs: pairs, + actions: []*Action{}, } } @@ -906,6 +913,7 @@ func randomFirewallDBEntries(t *testing.T, ctx context.Context, return &expectedResult{ kvEntries: kvEntries.kvEntries, privPairs: privPairs.privPairs, + actions: []*Action{}, } } From 273159ff13398ff0f4f6db40cabe6b4eea97c6cc Mon Sep 17 00:00:00 2001 From: Viktor Torstensson Date: Mon, 8 Sep 2025 18:10:25 +0200 Subject: [PATCH 4/5] firewalldb: pass accountStore & rootKeyStore to mig tests This commit adds an `accountStore` and a `rootKeyStore` arg the database population functions of the kvdb to SQL migration tests of the firewalldb. As an action can be linked to an account, we need to enable simulation of that in the migration tests of the actions store. In order to create the accounts to link the actions to, we need to create the accounts in the account store, which therefore requires passing the `accountStore` to database population functions of the migration tests. As the kvdb to SQL migration also will update the migrated actions to not only store the 4 byte short ID of the action's corresponding macaroon, but to it's full 8 byte root key ID. This requires the migration function has access to all of lnd's 8 byte root key IDs, and the migration function will therefore be change to accept a [][]byte arg containing all of lnd's root key IDs. As we can't access a full lnd instance in the migration unit tests, we need to create a mock instance that simulates the root key store, and this commit therefore adds mock `rootKeyStore` struct which is also passed to the database population functions of the migration tests. This `rootKeyStore` struct can be used to generate dummy root key IDs when creating simulated actions in the migration tests. --- firewalldb/sql_migration_test.go | 114 ++++++++++++++++++++++++++----- 1 file changed, 96 insertions(+), 18 deletions(-) diff --git a/firewalldb/sql_migration_test.go b/firewalldb/sql_migration_test.go index db384bca0..b375c4bc5 100644 --- a/firewalldb/sql_migration_test.go +++ b/firewalldb/sql_migration_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "database/sql" + "encoding/binary" "errors" "fmt" "testing" @@ -35,6 +36,50 @@ var ( testEntryValue = []byte{1, 2, 3} ) +// rootKeyMockStore is a mock implementation of a macaroon service store that +// can be used to generate mock root keys for testing. +type rootKeyMockStore struct { + // rootKeys is a slice of all root keys that have been added to the + // store. + rootKeys [][]byte +} + +// addRootKeyFromIDSuffix adds a new root key to the store, using the passed +// 4 byte suffix. The function generates a root key that ends with the 4 byte +// suffix, prefixed by 4 random bytes. +func (r *rootKeyMockStore) addRootKeyFromIDSuffix(suffix [4]byte) uint64 { + // As a real root key is 8 bytes, we need to generate a random 4 byte + // prefix to prepend to the passed 4 byte suffix. + rootKey := append(randomBytes(4), suffix[:]...) + r.rootKeys = append(r.rootKeys, rootKey) + + return binary.BigEndian.Uint64(rootKey[:]) +} + +// addRootKeyFromAcctID adds a new root key to the store, using the first 4 +// bytes of the passed account ID as the suffix for the root key, prefixed by 4 +// random bytes. +func (r *rootKeyMockStore) addRootKeyFromAcctID(id accounts.AccountID) uint64 { + var acctPrefix [4]byte + copy(acctPrefix[:], id[:4]) + + return r.addRootKeyFromIDSuffix(acctPrefix) +} + +// addRandomRootKey adds a new random root key to the store, and returns the +// root key ID as an uint64. +func (r *rootKeyMockStore) addRandomRootKey() uint64 { + rootKey := randomBytes(8) + r.rootKeys = append(r.rootKeys, rootKey) + + return binary.BigEndian.Uint64(rootKey[:]) +} + +// getAllRootKeys returns all root keys that have been added to the store. +func (r *rootKeyMockStore) getAllRootKeys() [][]byte { + return r.rootKeys +} + // expectedResult represents the expected result of a migration test. type expectedResult struct { kvEntries []*kvEntry @@ -294,13 +339,16 @@ func TestFirewallDBMigration(t *testing.T) { tests := []struct { name string populateDB func(t *testing.T, ctx context.Context, - boltDB *BoltDB, sessionStore session.Store) *expectedResult + boltDB *BoltDB, sessionStore session.Store, + accountsStore accounts.Store, + rKeyStore *rootKeyMockStore) *expectedResult }{ { name: "empty", populateDB: func(t *testing.T, ctx context.Context, - boltDB *BoltDB, - sessionStore session.Store) *expectedResult { + boltDB *BoltDB, sessionStore session.Store, + accountsStore accounts.Store, + rKeyStore *rootKeyMockStore) *expectedResult { // Don't populate the DB, and return empty kv // records and privacy pairs. @@ -384,9 +432,12 @@ func TestFirewallDBMigration(t *testing.T) { require.NoError(t, firewallStore.Close()) }) + rootKeyStore := &rootKeyMockStore{} + // Populate the kv store. entries := test.populateDB( t, ctx, firewallStore, sessionsStore, + accountStore, rootKeyStore, ) // Create the SQL store that we will migrate the data @@ -412,7 +463,8 @@ func TestFirewallDBMigration(t *testing.T) { // globalEntries populates the kv store with one global entry for the temp // store, and one for the perm store. func globalEntries(t *testing.T, ctx context.Context, boltDB *BoltDB, - _ session.Store) *expectedResult { + _ session.Store, _ accounts.Store, + _ *rootKeyMockStore) *expectedResult { return insertTempAndPermEntry( t, ctx, boltDB, testRuleName, fn.None[[]byte](), @@ -424,7 +476,8 @@ func globalEntries(t *testing.T, ctx context.Context, boltDB *BoltDB, // entry for the local temp store, and one session specific entry for the perm // local store. func sessionSpecificEntries(t *testing.T, ctx context.Context, boltDB *BoltDB, - sessionStore session.Store) *expectedResult { + sessionStore session.Store, _ accounts.Store, + _ *rootKeyMockStore) *expectedResult { groupAlias := getNewSessionAlias(t, ctx, sessionStore) @@ -438,7 +491,8 @@ func sessionSpecificEntries(t *testing.T, ctx context.Context, boltDB *BoltDB, // entry for the local temp store, and one feature specific entry for the perm // local store. func featureSpecificEntries(t *testing.T, ctx context.Context, boltDB *BoltDB, - sessionStore session.Store) *expectedResult { + sessionStore session.Store, _ accounts.Store, + _ *rootKeyMockStore) *expectedResult { groupAlias := getNewSessionAlias(t, ctx, sessionStore) @@ -456,7 +510,8 @@ func featureSpecificEntries(t *testing.T, ctx context.Context, boltDB *BoltDB, // any entries when the entry set is more complex than just a single entry at // each level. func allEntryCombinations(t *testing.T, ctx context.Context, boltDB *BoltDB, - sessionStore session.Store) *expectedResult { + sessionStore session.Store, acctStore accounts.Store, + rStore *rootKeyMockStore) *expectedResult { var result []*kvEntry add := func(entry *expectedResult) { @@ -465,9 +520,13 @@ func allEntryCombinations(t *testing.T, ctx context.Context, boltDB *BoltDB, // First lets create standard entries at all levels, which represents // the entries added by other tests. - add(globalEntries(t, ctx, boltDB, sessionStore)) - add(sessionSpecificEntries(t, ctx, boltDB, sessionStore)) - add(featureSpecificEntries(t, ctx, boltDB, sessionStore)) + add(globalEntries(t, ctx, boltDB, sessionStore, acctStore, rStore)) + add(sessionSpecificEntries( + t, ctx, boltDB, sessionStore, acctStore, rStore, + )) + add(featureSpecificEntries( + t, ctx, boltDB, sessionStore, acctStore, rStore, + )) groupAlias := getNewSessionAlias(t, ctx, sessionStore) @@ -647,7 +706,8 @@ func insertKvEntry(t *testing.T, ctx context.Context, // across all possible combinations of different levels of entries in the kv // store. All values and different bucket names are randomly generated. func randomKVEntries(t *testing.T, ctx context.Context, - boltDB *BoltDB, sessionStore session.Store) *expectedResult { + boltDB *BoltDB, sessionStore session.Store, _ accounts.Store, + _ *rootKeyMockStore) *expectedResult { var ( // We set the number of entries to insert to 1000, as that @@ -769,7 +829,8 @@ func randomKVEntries(t *testing.T, ctx context.Context, // oneSessionAndPrivPair inserts 1 session with 1 privacy pair into the // boltDB. func oneSessionAndPrivPair(t *testing.T, ctx context.Context, - boltDB *BoltDB, sessionStore session.Store) *expectedResult { + boltDB *BoltDB, sessionStore session.Store, _ accounts.Store, + _ *rootKeyMockStore) *expectedResult { return createPrivacyPairs(t, ctx, boltDB, sessionStore, 1, 1) } @@ -777,7 +838,8 @@ func oneSessionAndPrivPair(t *testing.T, ctx context.Context, // oneSessionsMultiplePrivPairs inserts 1 session with 10 privacy pairs into the // boltDB. func oneSessionsMultiplePrivPairs(t *testing.T, ctx context.Context, - boltDB *BoltDB, sessionStore session.Store) *expectedResult { + boltDB *BoltDB, sessionStore session.Store, _ accounts.Store, + _ *rootKeyMockStore) *expectedResult { return createPrivacyPairs(t, ctx, boltDB, sessionStore, 1, 10) } @@ -785,7 +847,8 @@ func oneSessionsMultiplePrivPairs(t *testing.T, ctx context.Context, // multipleSessionsAndPrivacyPairs inserts 5 sessions with 10 privacy pairs // per session into the boltDB. func multipleSessionsAndPrivacyPairs(t *testing.T, ctx context.Context, - boltDB *BoltDB, sessionStore session.Store) *expectedResult { + boltDB *BoltDB, sessionStore session.Store, _ accounts.Store, + _ *rootKeyMockStore) *expectedResult { return createPrivacyPairs(t, ctx, boltDB, sessionStore, 5, 10) } @@ -847,7 +910,8 @@ func createPrivacyPairs(t *testing.T, ctx context.Context, // randomPrivacyPairs creates a random number of privacy pairs to 10 sessions. func randomPrivacyPairs(t *testing.T, ctx context.Context, - boltDB *BoltDB, sessionStore session.Store) *expectedResult { + boltDB *BoltDB, sessionStore session.Store, _ accounts.Store, + _ *rootKeyMockStore) *expectedResult { numSessions := 10 maxPairsPerSession := 20 @@ -905,10 +969,15 @@ func randomPrivacyPairs(t *testing.T, ctx context.Context, // TODO(viktor): Extend this function to also populate it with random action // entries, once the actions migration has been implemented. func randomFirewallDBEntries(t *testing.T, ctx context.Context, - boltDB *BoltDB, sessionStore session.Store) *expectedResult { + boltDB *BoltDB, sessionStore session.Store, acctStore accounts.Store, + rStore *rootKeyMockStore) *expectedResult { - kvEntries := randomKVEntries(t, ctx, boltDB, sessionStore) - privPairs := randomPrivacyPairs(t, ctx, boltDB, sessionStore) + kvEntries := randomKVEntries( + t, ctx, boltDB, sessionStore, acctStore, rStore, + ) + privPairs := randomPrivacyPairs( + t, ctx, boltDB, sessionStore, acctStore, rStore, + ) return &expectedResult{ kvEntries: kvEntries.kvEntries, @@ -927,3 +996,12 @@ func randomString(n int) string { } return string(b) } + +// randomBytes generates a random byte array of the passed length n. +func randomBytes(n int) []byte { + b := make([]byte, n) + for i := range b { + b[i] = byte(rand.Intn(256)) // Random int between 0-255, then cast to byte + } + return b +} From ec8b57ee0f4cc0fcc3b16d80395b73b35fc5a832 Mon Sep 17 00:00:00 2001 From: Viktor Torstensson Date: Mon, 28 Jul 2025 10:31:43 +0200 Subject: [PATCH 5/5] firewalldb: add actions SQL migration This commit introduces the migration logic for transitioning the actions store from kvdb to SQL. Note that as of this commit, the migration is not yet triggered by any production code, i.e. only tests execute the migration logic. --- firewalldb/sql_migration.go | 797 ++++++++++++++++++++++- firewalldb/sql_migration_test.go | 1030 +++++++++++++++++++++++++++++- 2 files changed, 1812 insertions(+), 15 deletions(-) diff --git a/firewalldb/sql_migration.go b/firewalldb/sql_migration.go index 9d870d842..b6db608a9 100644 --- a/firewalldb/sql_migration.go +++ b/firewalldb/sql_migration.go @@ -4,12 +4,20 @@ import ( "bytes" "context" "database/sql" + "encoding/binary" "errors" "fmt" + "reflect" + "sort" + "time" + "github.com/davecgh/go-spew/spew" + "github.com/lightninglabs/lightning-terminal/accounts" "github.com/lightninglabs/lightning-terminal/db/sqlc" + "github.com/lightninglabs/lightning-terminal/session" "github.com/lightningnetwork/lnd/fn" "github.com/lightningnetwork/lnd/sqldb" + "github.com/pmezard/go-difflib/difflib" "go.etcd.io/bbolt" ) @@ -80,7 +88,8 @@ type privacyPairs = map[int64]map[string]string // NOTE: As sessions may contain linked sessions and accounts, the sessions and // accounts sql migration MUST be run prior to this migration. func MigrateFirewallDBToSQL(ctx context.Context, kvStore *bbolt.DB, - sqlTx SQLQueries) error { + sqlTx SQLQueries, sessionDB session.SQLQueries, + accountDB accounts.SQLQueries, macRootKeyIDs [][]byte) error { log.Infof("Starting migration of the rules DB to SQL") @@ -94,9 +103,14 @@ func MigrateFirewallDBToSQL(ctx context.Context, kvStore *bbolt.DB, return err } - log.Infof("The rules DB has been migrated from KV to SQL.") + err = migrateActionsToSQL( + ctx, kvStore, sqlTx, sessionDB, accountDB, macRootKeyIDs, + ) + if err != nil { + return err + } - // TODO(viktor): Add migration for the action stores. + log.Infof("The rules DB has been migrated from KV to SQL.") return nil } @@ -774,3 +788,780 @@ func validateGroupPairsMigration(ctx context.Context, sqlTx SQLQueries, return nil } + +// migrateActionsToSQL runs the migration of the actions store from the KV +// database to the SQL database. The function also asserts that the migrated +// values match the original values in the actions store. +func migrateActionsToSQL(ctx context.Context, kvStore *bbolt.DB, + sqlTx SQLQueries, sessionDB session.SQLQueries, + accountsDB accounts.SQLQueries, macRootKeyIDs [][]byte) error { + + log.Infof("Starting migration of the actions store to SQL") + + // Start by fetching all accounts and sessions, and map them by their + // IDs. This will allow us to quickly look up any account(s) and/or + // session that match a specific action's macaroon identifier. + accts, err := accountsDB.ListAllAccounts(ctx) + if err != nil { + return fmt.Errorf("listing accounts failed: %w", err) + } + + acctsMap, err := mapAccounts(accts) + if err != nil { + return fmt.Errorf("mapping accounts failed: %w", err) + } + + sessions, err := sessionDB.ListSessions(ctx) + if err != nil { + return fmt.Errorf("listing sessions failed: %w", err) + } + + sessionMap, err := mapSessions(sessions) + if err != nil { + return fmt.Errorf("mapping sessions failed: %w", err) + } + + // Next, as the kvdb actions only have their last 4 bytes set for the + // MacaroonRootKeyID field, we'll do a best effort attempt fetch the + // full root key ID (all 8 bytes) from lnd when migrating each action. + // We do so by mapping the macaroon root key IDs by their 4 byte suffix, + // to make it easy to look up the full root key ID for each action when + // we migrate them, as they only have the last 4 bytes set. + macMap, err := mapMacIds(macRootKeyIDs) + if err != nil { + return fmt.Errorf("mapping macaroon root key IDs failed: %w", + err) + } + + // Iterate over and migrate all actions in the KVDB. Note that this + // function migrates each action while iterating over them, instead + // of first collecting all actions and storing them in memory before + // migrating them (which is common for other migrations). This is + // because in comparison to other stores, the actions store may contain + // a large number of entries. + err = kvStore.View(func(tx *bbolt.Tx) error { + actionsBucket := tx.Bucket(actionsBucketKey) + if actionsBucket == nil { + return fmt.Errorf("actions bucket not found") + } + + sessionsBucket := actionsBucket.Bucket(actionsKey) + if sessionsBucket == nil { + return fmt.Errorf("actions->sessions bucket not found") + } + + // Iterate over session ID buckets (i.e. what we should name + // macaroon IDs). + return sessionsBucket.ForEach(func(macID []byte, v []byte) error { + if v != nil { + return fmt.Errorf("expected only sub-buckets " + + "in sessions bucket") + } + + sessBucket := sessionsBucket.Bucket(macID) + if sessBucket == nil { + return fmt.Errorf("session bucket for %x not "+ + "found", macID) + } + + // fetch the full macaroon root key ID based on the + // macaroon identifier for the action (the last 4 bytes + // of the root key ID). + var macIDArr [4]byte + copy(macIDArr[:], macID) + + macRootKeyID, ok := macMap[macIDArr] + if !ok { + // If we don't have a mapping for this macaroon + // ID, this could mean that the user has deleted + // the lnd macaroon db, but not the litd + // firewalldb. + // As there is no way to recover the full + // macaroonRootKeyID at this point, we set the + // first 4 bytes to zeroes, similar to how the + // action is already persisted for kvdb + // backends. + log.Warnf("No macaroon root key ID found for "+ + "macaroon ID %x, using zeroes for "+ + "the first 4 bytes", macID) + + macRootKeyID = make([]byte, 8) + copy(macRootKeyID[4:], macIDArr[:]) + } + + // Iterate over the actions inside each session/macaroon + // ID. + return sessBucket.ForEach(func(actionID, + actionBytes []byte) error { + + if actionBytes == nil { + return fmt.Errorf("unexpected nested "+ + "bucket under session %x", + macID) + } + + sessionID, err := session.IDFromBytes(macID) + if err != nil { + // This should be unreachable, as the + // macID should always be 4 bytes long. + return fmt.Errorf("invalid session ID "+ + "format %x: %v", macID, err) + } + + action, err := DeserializeAction( + bytes.NewReader(actionBytes), sessionID, + ) + if err != nil { + return fmt.Errorf("unable to "+ + "deserialize action in "+ + "session %x: %w", macID, err) + } + + log.Infof("Migrated Action: Macaroon ID: %x, "+ + "ActionID: %x, Actor: %s, Feature: %s", + macID, actionID, action.ActorName, + action.FeatureName) + + // Now proceed to migrate the action, and also + // validate that the action was correctly + // migrated. + err = migrateActionToSQL( + ctx, sqlTx, sessionDB, accountsDB, + acctsMap, sessionMap, action, + macRootKeyID, + ) + if err != nil { + return fmt.Errorf("migrating action "+ + "to SQL failed: %w", err) + } + + return nil + }) + }) + }) + if err != nil { + return fmt.Errorf("iterating over actions failed: %w", err) + } + + log.Infof("Finished iterating actions in KV store (no persistence yet).") + + return nil +} + +// migrateActionToSQL migrates a single action to the SQL database, and +// validates that the action was correctly migrated. +func migrateActionToSQL(ctx context.Context, sqlTx SQLQueries, + sessionDB session.SQLQueries, accountsDB accounts.SQLQueries, + acctsMap map[[4]byte][]sqlc.Account, sessMap map[[4]byte]sqlc.Session, + action *Action, macRootKeyID []byte) error { + + var ( + macIDSuffix [4]byte + err error + insertParams sqlc.InsertActionParams + ) + + // Extract the last 4 bytes of the macaroon root key ID suffix, to find + // any potential linked account(s) and/or session for the action. + // Note that the macRootKeyID is guaranteed to be 8 bytes long. + copy(macIDSuffix[:], macRootKeyID[len(macRootKeyID)-4:]) + + actAccounts, hasAccounts := acctsMap[macIDSuffix] + actSession, hasSessions := sessMap[macIDSuffix] + + // Based on if we found any potential linked account(s) and/or + // session, link the action to them in the SQL DB. + // The logic is as follows: + // 1) If we only find a potential linked session, the action + // is linked to the session. + // 2) If we only find potential linked account(s), the action + // is linked the account with the earliest expiry (where accounts + // that do not expire is seen as the earliest). + // 3) If we find both potential linked account(s) and session, + // the session is prioritized, and the action is linked + // to the session. + // 4) If we don't find any potential linked account(s) or session, + // the action is not linked to any account or session. + switch { + case hasAccounts && hasSessions: + // Alternative (3) above. + insertParams, err = paramsFromBothSessionAndAccounts( + ctx, accountsDB, action, actAccounts, actSession, + macRootKeyID, + ) + case hasSessions: + // Alternative (1) above. + insertParams, err = paramsFromSession( + action, actSession, macRootKeyID, + ) + case hasAccounts: + // Alternative (2) above. + insertParams, err = paramsFromAccounts( + ctx, accountsDB, action, actAccounts, macRootKeyID) + default: + // Alternative (4) above. + insertParams = paramsFromAction(action, macRootKeyID) + } + if err != nil { + return fmt.Errorf("getting insert params failed: %w", err) + } + + // With the insert params ready, we can now insert the action + // into the SQL DB. + migratedActionID, err := sqlTx.InsertAction(ctx, insertParams) + if err != nil { + return fmt.Errorf( + "inserting action into SQL DB failed: %w", err, + ) + } + + // Finally, validate that the action was correctly migrated. + return validateMigratedAction( + ctx, sqlTx, sessionDB, action, insertParams, migratedActionID, + ) +} + +// validateMigratedAction validates that the migrated action in the SQL DB +// matches the original action in the KV DB. The function takes the original +// action, the insert params used to insert the action into the SQL DB, +// and the ID of the migrated action in the SQL DB. +func validateMigratedAction(ctx context.Context, sqlTx SQLQueries, + sessionDB session.SQLQueries, kvAction *Action, + insertParams sqlc.InsertActionParams, migratedActionID int64) error { + + // First, fetch the action back from the SQL DB. + migAction, err := getAndMarshalAction(ctx, sqlTx, migratedActionID) + if err != nil { + return fmt.Errorf("fetching migrated action with id %d from "+ + "SQL DB failed: %w", migratedActionID, err) + } + + // Before we compare the two actions, we need to override the + // time zone in the action. + overrideActionTimeZone(kvAction) + overrideActionTimeZone(migAction) + + var ( + overriddenSessID = fn.None[session.ID]() + overriddenAcctID = fn.None[accounts.AccountID]() + ) + + // As the original KVDB action does not persist session and account + // references correctly, we need to override them to the expected + // session and account IDs based on what the inserted SQL action's + // fields were set to. This is required in order to make the KVDB and + // SQL actions comparable. + if insertParams.SessionID.Valid { + sess, err := sessionDB.GetSessionByID( + ctx, insertParams.SessionID.Int64, + ) + if err != nil { + return fmt.Errorf("unable to get session with id %d: %w", + insertParams.SessionID.Int64, err) + } + + overriddenSessID = fn.Some(session.ID(sess.Alias)) + } + + if insertParams.AccountID.Valid { + acct, err := sessionDB.GetAccount( + ctx, insertParams.AccountID.Int64, + ) + if err != nil { + return fmt.Errorf("unable to get account with id %d: %w", + insertParams.AccountID.Int64, err) + } + + acctAlias, err := accounts.AccountIDFromInt64(acct.Alias) + if err != nil { + return fmt.Errorf("unable to get convert int64 "+ + "account alias to []byte form: %w", err) + } + + overriddenAcctID = fn.Some(acctAlias) + } + + overrideActionSessionAndAccount( + kvAction, overriddenSessID, overriddenAcctID, + ) + + // Finally, we need to override the macaroon ID field in the migrated + // SQL action, as the KVDB action only has the last 4 bytes set, while + // the SQL action has the full 8 bytes set. + overrideMacRootKeyID(migAction) + + // Now that we have overridden the fields that are expected to differ + // between the original KVDB action and the migrated SQL action, we can + // compare the two actions to ensure that they match. + if !reflect.DeepEqual(kvAction, migAction) { + diff := difflib.UnifiedDiff{ + A: difflib.SplitLines( + spew.Sdump(kvAction), + ), + B: difflib.SplitLines( + spew.Sdump(migAction), + ), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 3, + } + diffText, _ := difflib.GetUnifiedDiffString(diff) + + return fmt.Errorf("migrated action does not match original "+ + "action: \n%v", diffText) + } + + return nil +} + +// paramsFromBothSessionAndAccounts handles cases where both potential +// account(s) and session responsible for the action exists. In this case, +// we prioritize linking the action to the session. If the potential linked +// session is not a match for the action, we fall back to linking the action +// to the potential linked account with the earliest expiry (where accounts +// that do not expire is seen as the earliest). +func paramsFromBothSessionAndAccounts(ctx context.Context, + accountsDB accounts.SQLQueries, action *Action, actAccts []sqlc.Account, + sess sqlc.Session, macRootKeyID []byte) (sqlc.InsertActionParams, + error) { + + // Check if the potential linked session and account(s) could actually + // be responsible for the action, or if they should be filtered out. + sessOpt := getMatchingSessionForAction(action, sess) + acctOpt, err := getMatchingAccountForAction( + ctx, accountsDB, action, actAccts, + ) + if err != nil { + return sqlc.InsertActionParams{}, err + } + + switch { + case acctOpt.IsSome() && sessOpt.IsSome(): + // If we find both a potential linked account and session, we + // prio linking the session to the action. + return paramsFromSession(action, sess, macRootKeyID) + case acctOpt.IsSome(): + // If the session was filtered out, but we still have an + // account, we link the action to the account. + return paramsFromAccounts( + ctx, accountsDB, action, actAccts, macRootKeyID, + ) + case sessOpt.IsSome(): + return paramsFromSession(action, sess, macRootKeyID) + default: + // If no potential linked account or session were found after + // filtering, we won't link the action to any of them. + return paramsFromAction(action, macRootKeyID), nil + } +} + +// paramsFromSession returns the insert params for an action linked to a +// session. If the session is not a match for the action, the action will not be +// linked to the session. +func paramsFromSession(action *Action, actSess sqlc.Session, + macRootKeyID []byte) (sqlc.InsertActionParams, error) { + + sessOpt := getMatchingSessionForAction(action, actSess) + + params := paramsFromAction(action, macRootKeyID) + + sessOpt.WhenSome(func(sess sqlc.Session) { + params.SessionID = sqldb.SQLInt64(sess.ID) + params.AccountID = sess.AccountID + }) + + return params, nil +} + +// paramsFromAccounts returns the insert params for an action linked to an +// account. If no matching account is found for the action, the action will not +// be linked to any account. +func paramsFromAccounts(ctx context.Context, accountsDB accounts.SQLQueries, + action *Action, actAccts []sqlc.Account, + macRootKeyID []byte) (sqlc.InsertActionParams, error) { + + acctOpt, err := getMatchingAccountForAction( + ctx, accountsDB, action, actAccts, + ) + if err != nil { + return sqlc.InsertActionParams{}, err + } + + params := paramsFromAction(action, macRootKeyID) + + acctOpt.WhenSome(func(acct sqlc.Account) { + params.AccountID = sqldb.SQLInt64(acct.ID) + }) + + return params, nil +} + +// paramsFromAction returns the insert params for an action that is not linked +// to any account or session. +func paramsFromAction(action *Action, + macRootKeyID []byte) sqlc.InsertActionParams { + + return sqlc.InsertActionParams{ + MacaroonIdentifier: macRootKeyID, + ActorName: sqldb.SQLStr(action.ActorName), + FeatureName: sqldb.SQLStr(action.FeatureName), + ActionTrigger: sqldb.SQLStr(action.Trigger), + Intent: sqldb.SQLStr(action.Intent), + StructuredJsonData: []byte(action.StructuredJsonData), + RpcMethod: action.RPCMethod, + RpcParamsJson: action.RPCParamsJson, + CreatedAt: action.AttemptedAt, + ActionState: int16(action.State), + ErrorReason: sqldb.SQLStr(action.ErrorReason), + } +} + +// getMatchingSessionForAction checks if the potential linked session +// could actually be responsible for the action, or if it should be filtered +// out. +func getMatchingSessionForAction(action *Action, + sess sqlc.Session) fn.Option[sqlc.Session] { + + attempted := action.AttemptedAt + + // We filter of the session if the session could not have been + // responsible for the action, based on the action's attempted + // timestamp. + + // Exclude the session if it was revoked before the attempted at time. + if sess.RevokedAt.Valid && sess.RevokedAt.Time.Before(attempted) { + return fn.None[sqlc.Session]() + } + // Exclude the session if it was created after the attempt at time. + if sess.CreatedAt.After(attempted) { + return fn.None[sqlc.Session]() + } + // Exclude the session if it expired before the attempt at time. + if sess.Expiry.Before(attempted) { + return fn.None[sqlc.Session]() + } + + // If we reach this point, the session is a potential match for + // the action. + return fn.Some(sess) +} + +// getMatchingAccountForAction checks if any of the potential linked account(s) +// could actually be responsible for the action, or if they should be +// filtered out. If multiple accounts remain after filtering, we pick the one +// with the earliest expiration, but where non expiring accounts are picked +// first. The reason for picking the earliest expiration is motivated with the +// reasoning that such accounts were more likely to have existed at the time of +// the action, as we have no way of tracking when the account was created. +func getMatchingAccountForAction(ctx context.Context, + accountsDB accounts.SQLQueries, action *Action, + actAccts []sqlc.Account) (fn.Option[sqlc.Account], error) { + + // sendMethods is the RPC methods that trigger payments to be added an + // account. We use this to filter out accounts that have no payments + // when the action is triggered by sending a payment. + var sendMethods = map[string]struct{}{ + "/lnrpc.Lightning/SendPayment": {}, + "/lnrpc.Lightning/SendPaymentSync": {}, + "/routerrpc.Router/SendPaymentV2": {}, + "/lnrpc.Lightning/SendToRoute": {}, + "/lnrpc.Lightning/SendToRouteSync": {}, + "/routerrpc.Router/SendToRouteV2": {}, + } + + // We cannot have an ActorName set for an action if the action was + // triggered by an account. + if action.ActorName != "" { + return fn.None[sqlc.Account](), nil + } + + attempted := action.AttemptedAt + + // 1) Do some initial filtering of the accounts. + filtered := make([]sqlc.Account, 0, len(actAccts)) + for _, a := range actAccts { + // Exclude the account if it expired before the attempt at time. + if !a.Expiration.IsZero() && a.Expiration.Before(attempted) { + continue + } + + invoices, err := accountsDB.ListAccountInvoices(ctx, a.ID) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return fn.None[sqlc.Account](), fmt.Errorf("listing "+ + "invoices for account %d failed: %w", a.ID, err) + } + payments, err := accountsDB.ListAccountPayments(ctx, a.ID) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return fn.None[sqlc.Account](), fmt.Errorf("listing "+ + "payments for account %d failed: %w", a.ID, err) + } + + // Exclude the account if the action is triggered by creating + // an invoice, but the account has no invoices. + if action.RPCMethod == "/lnrpc.Lightning/AddInvoice" { + if len(invoices) == 0 { + continue + } + } + + // Exclude the account if the action is triggered by sending + // a payment, but the account has no payments. + if _, ok := sendMethods[action.RPCMethod]; ok { + if len(payments) == 0 { + continue + } + } + + filtered = append(filtered, a) + } + + // 2) If no accounts remain after filtering, no potential linked account + // for the action was found. + if len(filtered) == 0 { + return fn.None[sqlc.Account](), nil + } + + // 3) If multiple accounts remain after filtering, we pick the one with + // the earliest expiration, but where non expiring accounts are + // picked first. + if len(filtered) > 1 { + sort.Slice(filtered, func(i, j int) bool { + zeroI := filtered[i].Expiration.IsZero() + zeroJ := filtered[j].Expiration.IsZero() + + // If one is zero and the other is not, zero comes first + if zeroI && !zeroJ { + return true + } + if zeroJ && !zeroI { + return false + } + + // Else, both are zero or both are non-zero. If both are + // non-zero, we pick the earliest expiration first. + return filtered[i].Expiration.Before( + filtered[j].Expiration, + ) + }) + } + + // 4) Return the first account of the filtered list, which has been + // ordered if multiple accounts remain. + return fn.Some(filtered[0]), nil +} + +// getAndMarshalAction fetches an action by its ID from the SQL DB, and marshals +// it into the Action struct. +func getAndMarshalAction(ctx context.Context, sqlTx SQLQueries, id int64) ( + *Action, error) { + + // First, fetch the action back from the SQL DB. + dbAction, err := sqlTx.GetAction(ctx, id) + if errors.Is(err, sql.ErrNoRows) { + return nil, errors.New("action not found") + } else if err != nil { + return nil, err + } + + return marshalDBAction(ctx, sqlTx, dbAction) +} + +// marshalDBAction marshals a sqlc.Action into the Action struct. +func marshalDBAction(ctx context.Context, sqlTx SQLQueries, + dbAction sqlc.Action) (*Action, error) { + + var legacySessID fn.Option[session.ID] + if dbAction.SessionID.Valid { + legacySessIDB, err := sqlTx.GetAliasBySessionID( + ctx, dbAction.SessionID.Int64, + ) + if err != nil { + return nil, fmt.Errorf("unable to get legacy "+ + "session ID for session ID %d: %w", + dbAction.SessionID.Int64, err) + } + + sessID, err := session.IDFromBytes(legacySessIDB) + if err != nil { + return nil, err + } + + legacySessID = fn.Some(sessID) + } + + var legacyAcctID fn.Option[accounts.AccountID] + if dbAction.AccountID.Valid { + acct, err := sqlTx.GetAccount(ctx, dbAction.AccountID.Int64) + if err != nil { + return nil, err + } + + acctID, err := accounts.AccountIDFromInt64(acct.Alias) + if err != nil { + return nil, fmt.Errorf("unable to get account ID: %w", + err) + } + + legacyAcctID = fn.Some(acctID) + } + + // Note that we export the full 8 byte macaroon root key ID in the sql + // actions DB, while the kvdb version persists and exports stored the + // last 4 bytes only. + var macRootKeyID fn.Option[uint64] + if len(dbAction.MacaroonIdentifier) >= 8 { + macRootKeyID = fn.Some( + binary.BigEndian.Uint64(dbAction.MacaroonIdentifier), + ) + } + + return &Action{ + AddActionReq: AddActionReq{ + MacaroonRootKeyID: macRootKeyID, + AccountID: legacyAcctID, + SessionID: legacySessID, + ActorName: dbAction.ActorName.String, + FeatureName: dbAction.FeatureName.String, + Trigger: dbAction.ActionTrigger.String, + Intent: dbAction.Intent.String, + StructuredJsonData: string(dbAction.StructuredJsonData), + RPCMethod: dbAction.RpcMethod, + RPCParamsJson: dbAction.RpcParamsJson, + }, + AttemptedAt: dbAction.CreatedAt, + State: ActionState(dbAction.ActionState), + ErrorReason: dbAction.ErrorReason.String, + }, nil +} + +// mapMacIds maps the macaroon root key IDs by their 4 byte suffix to make it +// easy to look up the full root key ID for each action based on the macaroon +// identifier (which is the last 4 bytes of the root key ID). +// The function returns a map where the key is the 4 byte suffix, and the +// value is the full root key ID. +func mapMacIds(macRootKeyIDs [][]byte) (map[[4]byte][]byte, error) { + // Start by converting the macRootKeyIDs to a map that let's us map the + // macaroon the 4 byte identifiers to the full uint64 RootKeyID. + macMap := make(map[[4]byte][]byte) + + for _, id := range macRootKeyIDs { + if len(id) < 4 { + return nil, fmt.Errorf("expected rootKeyID to be at "+ + "least 4 bytes long, got %d bytes", len(id)) + } + + // Extract the last 4 bytes of the root key ID to use as the + // key in the map. + var rootKeyShortID [4]byte + copy(rootKeyShortID[:], id[len(id)-4:]) + + // NOTE: If we already have an entry for this rootKeyShortID, + // we overwrite it with the new RootKeyID, as we can't determine + // which one is the correct one. + macMap[rootKeyShortID] = id + } + + return macMap, nil +} + +// mapAccounts maps the accounts by the 4 byte prefix of their Alias to make +// it easy to look up any potential linked account(s) for each action based +// on the macaroon identifier (which is the last 4 bytes of the root key ID). +// The function returns a map where the key is the 4 byte account prefix, and +// the value is a list of accounts that match that prefix. +func mapAccounts(accts []sqlc.Account) (map[[4]byte][]sqlc.Account, error) { + acctMap := make(map[[4]byte][]sqlc.Account) + + for _, acct := range accts { + aliasBytes := make([]byte, 8) + + // Convert the int64 account Alias to bytes (big-endian). + binary.BigEndian.PutUint64(aliasBytes, uint64(acct.Alias)) + + var acctPrefix [4]byte + copy(acctPrefix[:], aliasBytes[:4]) + + if acctList, ok := acctMap[acctPrefix]; ok { + acctMap[acctPrefix] = append(acctList, acct) + } else { + acctMap[acctPrefix] = []sqlc.Account{acct} + } + } + + return acctMap, nil +} + +// mapSessions maps the sessions by their 4 byte Alias, to make it easy to +// look up any potential linked session for each action based on the macaroon +// identifier (which is the last 4 bytes of the root key ID). +// The function returns a map where the key is the 4 byte Alias, and the +// value is the corresponding session. +func mapSessions(sessions []sqlc.Session) (map[[4]byte]sqlc.Session, error) { + sessMap := make(map[[4]byte]sqlc.Session) + + for _, sess := range sessions { + if len(sess.Alias) != 4 { + return nil, fmt.Errorf("session alias must be 4 "+ + "bytes, got %d bytes", len(sess.Alias)) + } + + var sessAlias [4]byte + copy(sessAlias[:], sess.Alias[:4]) + + if _, ok := sessMap[sessAlias]; ok { + // NOTE: This should be unreachable, as we shouldn't + // have multiple sessions with the same Alias, as the + // sessions store has already been migrated to SQL here, + // and the session's table has a UNIQUE constraint on + // the Alias column. + return nil, fmt.Errorf("shouldn't have multiple "+ + "sessions with the same alias %x", sessAlias) + } else { + sessMap[sessAlias] = sess + } + } + + return sessMap, nil +} + +// overrideActionTimeZone overrides the time zone of the action to the local +// time zone and chops off the nanosecond part for comparison. This is needed +// because KV database stores times as-is which as an unwanted side effect would +// fail migration due to time comparison expecting both the original and +// migrated actions to be in the same local time zone and in microsecond +// precision. Note that PostgresSQL stores times in microsecond precision while +// SQLite can store times in nanosecond precision if using TEXT storage class. +func overrideActionTimeZone(action *Action) { + fixTime := func(t time.Time) time.Time { + return t.In(time.Local).Truncate(time.Microsecond) + } + + if !action.AttemptedAt.IsZero() { + action.AttemptedAt = fixTime(action.AttemptedAt) + } +} + +// overrideActionSessionAndAccount overrides the session and account IDs of the +// action to the provided values. +func overrideActionSessionAndAccount(action *Action, + sessID fn.Option[session.ID], acctID fn.Option[accounts.AccountID]) { + + action.SessionID = sessID + action.AccountID = acctID +} + +// overrideMacRootKeyID overrides the MacaroonRootKeyID of the action to only +// contain the last 4 bytes (least significant 32 bits) of the original value. +// The first 4 bytes are set to zeroes. +// This is needed because the KV database only persists the last 4 bytes of the +// root key ID, while the SQL database persists the full 8 bytes. +func overrideMacRootKeyID(action *Action) { + action.MacaroonRootKeyID.WhenSome(func(macID uint64) { + // Extract only the last 32 bits (least significant 4 bytes). + last32 := macID & 0xFFFFFFFF + + action.MacaroonRootKeyID = fn.Some(last32) + }) +} diff --git a/firewalldb/sql_migration_test.go b/firewalldb/sql_migration_test.go index b375c4bc5..8e95097a4 100644 --- a/firewalldb/sql_migration_test.go +++ b/firewalldb/sql_migration_test.go @@ -5,6 +5,7 @@ import ( "context" "database/sql" "encoding/binary" + "encoding/json" "errors" "fmt" "testing" @@ -16,24 +17,45 @@ import ( "github.com/lightninglabs/lightning-terminal/session" "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/fn" + "github.com/lightningnetwork/lnd/macaroons" "github.com/lightningnetwork/lnd/sqldb" "github.com/stretchr/testify/require" + "go.etcd.io/bbolt" "golang.org/x/exp/rand" + "gopkg.in/macaroon-bakery.v2/bakery/checkers" + "gopkg.in/macaroon.v2" ) const ( - testRuleName = "test-rule" - testRuleName2 = "test-rule-2" - testFeatureName = "test-feature" - testFeatureName2 = "test-feature-2" - testEntryKey = "test-entry-key" - testEntryKey2 = "test-entry-key-2" - testEntryKey3 = "test-entry-key-3" - testEntryKey4 = "test-entry-key-4" + testRuleName = "test-rule" + testRuleName2 = "test-rule-2" + testFeatureName = "test-feature" + testFeatureName2 = "test-feature-2" + testEntryKey = "test-entry-key" + testEntryKey2 = "test-entry-key-2" + testEntryKey3 = "test-entry-key-3" + testEntryKey4 = "test-entry-key-4" + testSessionName = "test-session" + testServerAddress = "foo.bar.baz:1234" + testActorName = "test-actor" + testTrigger = "test-trigger" + testIntent = "test-intent" + testStructuredJsonData = "{\"test\":\"data\"}" + testRPCMethod = "Test.Method" + testRPCParamsJson = "{\"test\":\"data\"}" ) var ( testEntryValue = []byte{1, 2, 3} + testActionReq = AddActionReq{ + ActorName: "", + FeatureName: testFeatureName, + Trigger: testTrigger, + Intent: testIntent, + StructuredJsonData: testStructuredJsonData, + RPCMethod: testRPCMethod, + RPCParamsJson: []byte(testRPCParamsJson), + } ) // rootKeyMockStore is a mock implementation of a macaroon service store that @@ -317,6 +339,33 @@ func TestFirewallDBMigration(t *testing.T) { require.Equal(t, totalExpectedPairs, totalPairs) } + // assertActionsMigrationResults asserts that the migrated actions in + // the SQLDB match the original expected actions. It also asserts that + // the SQL DB does not contain any other actions than the expected ones. + assertActionsMigrationResults := func(t *testing.T, sqlStore *SQLDB, + expectedActions []*Action) { + + // First assert that the SQLDB contains the expected number of + // actions. + dbActions, _, _, err := sqlStore.ListActions( + ctx, &ListActionsQuery{}, + ) + require.NoError(t, err) + + require.Equal(t, len(expectedActions), len(dbActions)) + if len(expectedActions) == 0 { + return + } + + // Then assert that the actions in the SQLDB match the + // expected actions. + for i, migratedAction := range dbActions { + expAction := expectedActions[i] + + assertEqualActions(t, expAction, migratedAction) + } + } + // The assertMigrationResults asserts that the migrated entries in the // firewall SQLDB match the expected results which should represent the // original entries in the BoltDB. @@ -332,6 +381,8 @@ func TestFirewallDBMigration(t *testing.T) { assertPrivacyMapperMigrationResults( t, sqlStore, expRes.privPairs, ) + + assertActionsMigrationResults(t, sqlStore, expRes.actions) } // The tests slice contains all the tests that we will run for the @@ -395,6 +446,46 @@ func TestFirewallDBMigration(t *testing.T) { name: "random privacy pairs", populateDB: randomPrivacyPairs, }, + { + name: "action with no session or account", + populateDB: actionNoSessionOrAccount, + }, + { + name: "action with session but no account", + populateDB: actionWithSessionNoAccount, + }, + { + name: "action with filtered session", + populateDB: actionsWithFilteredSession, + }, + { + name: "action with session with linked account", + populateDB: actionWithSessionWithLinkedAccount, + }, + { + name: "action with account", + populateDB: actionWithAccount, + }, + { + name: "actions with filtered account", + populateDB: actionsWithFilteredAccount, + }, + { + name: "action with multiple accounts", + populateDB: actionWithMultipleAccounts, + }, + { + name: "action with session and account", + populateDB: actionWithSessionAndAccount, + }, + { + name: "action with session with linked account and account", + populateDB: actionWithSessionWithLinkedAccountAndAccount, + }, + { + name: "random actions", + populateDB: randomActions, + }, { name: "random firewalldb entries", populateDB: randomFirewallDBEntries, @@ -417,9 +508,14 @@ func TestFirewallDBMigration(t *testing.T) { // the sql version of the kv stores that we'll create // in test, without also needing to migrate it. accountStore := accounts.NewTestDB(t, clock) + acctSQLStore, ok := accountStore.(*accounts.SQLStore) + require.True(t, ok) + sessionsStore := session.NewTestDBWithAccounts( t, clock, accountStore, ) + sessSQLStore, ok := sessionsStore.(*session.SQLStore) + require.True(t, ok) // Create a new firewall store to populate with test // data. @@ -449,6 +545,8 @@ func TestFirewallDBMigration(t *testing.T) { func(tx SQLQueries) error { return MigrateFirewallDBToSQL( ctx, firewallStore.DB, tx, + acctSQLStore, sessSQLStore, + rootKeyStore.getAllRootKeys(), ) }, ) @@ -963,11 +1061,667 @@ func randomPrivacyPairs(t *testing.T, ctx context.Context, } } +// actionNoSessionOrAccount adds an action which is not linked to any session or +// account. +func actionNoSessionOrAccount(t *testing.T, ctx context.Context, + boltDB *BoltDB, _ session.Store, _ accounts.Store, + rStore *rootKeyMockStore) *expectedResult { + + // As the action is not linked to any session, we add a random root + // key which we use as the macaroon identifier for the action. + // This simulates how similar actions would have been created in + // production. + rootKey := rStore.addRandomRootKey() + + actionReq := testActionReq + actionReq.MacaroonRootKeyID = fn.Some(rootKey) + actionReq.SessionID = fn.None[session.ID]() + actionReq.AccountID = fn.None[accounts.AccountID]() + + action := addAction(t, ctx, boltDB, &actionReq) + + return &expectedResult{ + kvEntries: []*kvEntry{}, + privPairs: make(privacyPairs), + actions: []*Action{action}, + } +} + +// actionWithSessionNoAccount adds an action which is linked a session but no +// account. +func actionWithSessionNoAccount(t *testing.T, ctx context.Context, + boltDB *BoltDB, sessStore session.Store, _ accounts.Store, + rStore *rootKeyMockStore) *expectedResult { + + // Create the session that we will link the action to. + sess := testSession(t, ctx, sessStore) + + // To simulate that the action was created with a macaroon identifier + // that matches the session ID prefix, we add a root key with an ID + // that matches the session ID prefix. + rootKey := rStore.addRootKeyFromIDSuffix(sess.ID) + + actionReq := testActionReq + actionReq.MacaroonRootKeyID = fn.Some(rootKey) + // Link the action to the session, but no account. + actionReq.SessionID = fn.Some(sess.ID) + actionReq.AccountID = fn.None[accounts.AccountID]() + + action := addAction(t, ctx, boltDB, &actionReq) + + return &expectedResult{ + kvEntries: []*kvEntry{}, + privPairs: make(privacyPairs), + actions: []*Action{action}, + } +} + +// actionsWithFilteredSession adds actions where a matching session ID do exist, +// but where that session wasn't active at the time of the action event and +// therefore couldn't have been linked to the action. Such sessions are filtered +// out during the migration. +func actionsWithFilteredSession(t *testing.T, ctx context.Context, + boltDB *BoltDB, sessStore session.Store, _ accounts.Store, + rStore *rootKeyMockStore) *expectedResult { + + var actions []*Action + + // addActionFromReq is a helper function that adds an action from the + // passed request, and appends the added action to the actions slice. + addActionFromReq := func(req AddActionReq) { + actions = append(actions, addAction(t, ctx, boltDB, &req)) + } + + // First, we add an already expired session, as this should be filtered + // out during the action migration. + sess1 := testSessionWithExpiry( + t, ctx, sessStore, time.Now().Add(-time.Hour), + ) + + // Ensure that the root key ID that's used during the action creation + // does match the session ID prefix, to simulate that a collision did + // occur with for the root key ID with an already existing session. + rootKey1 := rStore.addRootKeyFromIDSuffix(sess1.ID) + + actionReq1 := testActionReq + actionReq1.MacaroonRootKeyID = fn.Some(rootKey1) + // However, as the session wasn't active at the time of the action + // creation, we don't link the session as the action wasn't linked to + // the session when it was created. + actionReq1.SessionID = fn.None[session.ID]() + actionReq1.AccountID = fn.None[accounts.AccountID]() + + addActionFromReq(actionReq1) + + // Next, we add a session that was revoked at the time of the action, + // and therefore couldn't be the intended session for the action. + sess2 := testSession(t, ctx, sessStore) + + // Revoke the session. + err := sessStore.ShiftState(ctx, sess2.ID, session.StateCreated) + require.NoError(t, err) + err = sessStore.ShiftState(ctx, sess2.ID, session.StateRevoked) + require.NoError(t, err) + + rootKey2 := rStore.addRootKeyFromIDSuffix(sess2.ID) + actionReq2 := testActionReq + actionReq2.MacaroonRootKeyID = fn.Some(rootKey2) + actionReq2.SessionID = fn.None[session.ID]() + actionReq2.AccountID = fn.None[accounts.AccountID]() + + addActionFromReq(actionReq2) + + return &expectedResult{ + kvEntries: []*kvEntry{}, + privPairs: make(privacyPairs), + actions: actions, + } +} + +// actionWithSessionWithLinkedAccount adds an action which is linked a session +// where the action itself is linked to an account. +func actionWithSessionWithLinkedAccount(t *testing.T, ctx context.Context, + boltDB *BoltDB, sessStore session.Store, acctStore accounts.Store, + rStore *rootKeyMockStore) *expectedResult { + + // Add a session with a linked account. + sess, acct, _ := testSessionWithAccount( + t, ctx, sessStore, acctStore, + ) + + rootKey := rStore.addRootKeyFromIDSuffix(sess.ID) + _ = rStore.addRootKeyFromAcctID(acct.ID) + + actionReq := testActionReq + actionReq.MacaroonRootKeyID = fn.Some(rootKey) + // As the session the action is linked to does have a linked account, + // we also link the action to the account. + actionReq.SessionID = fn.Some(sess.ID) + actionReq.AccountID = fn.Some(acct.ID) + + action := addAction(t, ctx, boltDB, &actionReq) + + return &expectedResult{ + kvEntries: []*kvEntry{}, + privPairs: make(privacyPairs), + actions: []*Action{action}, + } +} + +// actionWithAccount adds an action which is linked an account but no session. +func actionWithAccount(t *testing.T, ctx context.Context, + boltDB *BoltDB, _ session.Store, acctStore accounts.Store, + rStore *rootKeyMockStore) *expectedResult { + + // Create the account that we will link the action to. + acct, _ := testAccount(t, ctx, acctStore) + + // In production, the root key of the macaroon used when an account + // event triggers an action creation, will start with the first 4 bytes + // of the account ID. We therefore simulate that here by adding a root + // key with an ID that matches the account ID prefix. + rootKey := rStore.addRootKeyFromAcctID(acct.ID) + + actionReq := testActionReq + actionReq.MacaroonRootKeyID = fn.Some(rootKey) + // Link the action to the account, but no session. + actionReq.SessionID = fn.None[session.ID]() + actionReq.AccountID = fn.Some(acct.ID) + + action := addAction(t, ctx, boltDB, &actionReq) + + return &expectedResult{ + kvEntries: []*kvEntry{}, + privPairs: make(privacyPairs), + actions: []*Action{action}, + } +} + +// actionsWithFilteredAccount adds actions with a session ID that does match an +// account, but where that account couldn't have been the account triggered the +// action creation. Such accounts are filtered out during the migration, and +// are not linked to the migrated action. +func actionsWithFilteredAccount(t *testing.T, ctx context.Context, + boltDB *BoltDB, _ session.Store, acctStore accounts.Store, + rStore *rootKeyMockStore) *expectedResult { + + var actions []*Action + + // addActionFromReq is a helper function that adds an action from the + // passed request, and appends the added action to the actions slice. + addActionFromReq := func(req AddActionReq) { + actions = append(actions, addAction(t, ctx, boltDB, &req)) + } + + // First, we add an already expired account, as this should be filtered + // out during the action migration. + acct1, _ := testAccountWithExpiry( + t, ctx, acctStore, time.Now().Add(-time.Hour), + ) + + // Ensure that the root key ID that's used during the action creation + // does match the account ID prefix, to simulate that a collision did + // occur with for the root key ID with an already existing session. + rootKey1 := rStore.addRootKeyFromAcctID(acct1.ID) + + actionReq1 := testActionReq + actionReq1.MacaroonRootKeyID = fn.Some(rootKey1) + actionReq1.SessionID = fn.None[session.ID]() + // The action doesn't link to any account, as the action wasn't intended + // for the account when it was created. + actionReq1.AccountID = fn.None[accounts.AccountID]() + + addActionFromReq(actionReq1) + + // Next, we add an account that was active at the time of the action, + // but where the action itself had an actor set. This should not be + // possible if the action was triggered by an account event, and the + // account should therefore be filtered out during the migration. + acct2, _ := testAccount(t, ctx, acctStore) + + rootKey2 := rStore.addRootKeyFromAcctID(acct2.ID) + + actionReq2 := testActionReq + actionReq2.ActorName = testActorName + actionReq2.MacaroonRootKeyID = fn.Some(rootKey2) + actionReq2.SessionID = fn.None[session.ID]() + actionReq2.AccountID = fn.None[accounts.AccountID]() + + addActionFromReq(actionReq2) + + // Lastly, if an action is connected to an RPC endpoint which is either + // a payment or creation of an invoice, but the account that collides + // action's macaroon identifier doesn't have any payments or invoices, + // that account couldn't have been the trigger for the action. + acct3, _ := testAccount(t, ctx, acctStore) + + rootKey3 := rStore.addRootKeyFromAcctID(acct3.ID) + + actionReq3 := testActionReq + actionReq3.RPCMethod = "/routerrpc.Router/SendPaymentV2" + actionReq3.MacaroonRootKeyID = fn.Some(rootKey3) + actionReq3.SessionID = fn.None[session.ID]() + actionReq3.AccountID = fn.None[accounts.AccountID]() + + addActionFromReq(actionReq3) + + acct4, _ := testAccount(t, ctx, acctStore) + + rootKey4 := rStore.addRootKeyFromAcctID(acct4.ID) + + actionReq4 := testActionReq + actionReq4.RPCMethod = "/lnrpc.Lightning/AddInvoice" + actionReq4.MacaroonRootKeyID = fn.Some(rootKey4) + actionReq4.SessionID = fn.None[session.ID]() + actionReq4.AccountID = fn.None[accounts.AccountID]() + + addActionFromReq(actionReq4) + + return &expectedResult{ + kvEntries: []*kvEntry{}, + privPairs: make(privacyPairs), + actions: actions, + } +} + +// actionWithMultipleAccounts adds an action where the short macaroon RootKeyID +// collides with multiple different accounts. This test ensures that only one of +// the accounts gets linked, given the filtration rules that are applied during +// the migration. +func actionWithMultipleAccounts(t *testing.T, ctx context.Context, + boltDB *BoltDB, _ session.Store, acctStore accounts.Store, + rStore *rootKeyMockStore) *expectedResult { + + // Create two accounts with colliding prefixes, which expires at + // different times. + acct1, _ := testAccountWithExpiry( + t, ctx, acctStore, time.Now().Add(time.Hour*48), + ) + _, acctID2 := testAccountWithExpiry( + t, ctx, acctStore, time.Now().Add(time.Hour*24), + ) + + acctSqlStore, ok := acctStore.(*accounts.SQLStore) + require.True(t, ok) + + // To ensure that the two accounts do collide, we modify the alias + // of the second account to match the first 4 bytes of acct1's ID. + var newAcctAlias [8]byte + copy(newAcctAlias[:4], acct1.ID[:4]) + copy(newAcctAlias[4:], randomBytes(4)) + + newAcct2ID := accounts.AccountID(newAcctAlias) + acctAlias, err := newAcct2ID.ToInt64() + require.NoError(t, err) + + _, err = acctSqlStore.UpdateAccountAliasForTests( + ctx, sqlc.UpdateAccountAliasForTestsParams{ + Alias: acctAlias, + ID: acctID2, + }, + ) + require.NoError(t, err) + + // Mock the root keys for both accounts. + _ = rStore.addRootKeyFromAcctID(acct1.ID) + rootKey := rStore.addRootKeyFromAcctID(newAcct2ID) + + actionReq := testActionReq + actionReq.MacaroonRootKeyID = fn.Some(rootKey) + actionReq.SessionID = fn.None[session.ID]() + // When two colliding accounts exist, the account with the earliest + // expiry should be linked to the action. In our case, that's acct2. + actionReq.AccountID = fn.Some(newAcct2ID) + + action := addAction(t, ctx, boltDB, &actionReq) + + return &expectedResult{ + kvEntries: []*kvEntry{}, + privPairs: make(privacyPairs), + actions: []*Action{action}, + } +} + +// actionWithSessionAndAccount adds an action where both a session and an +// account exist with IDs that collide with the action's macaroon RootKeyID. +// This test ensures that the action is linked to the session, since sessions +// take precedence over accounts during the migration. +func actionWithSessionAndAccount(t *testing.T, ctx context.Context, + boltDB *BoltDB, sessStore session.Store, acctStore accounts.Store, + rStore *rootKeyMockStore) *expectedResult { + + // Create a session and an account that will collide. + sess := testSession(t, ctx, sessStore) + _, acctID := testAccount(t, ctx, acctStore) + + acctSqlStore, ok := acctStore.(*accounts.SQLStore) + require.True(t, ok) + + // Modify the first 4 bytes of the account alias to match the session + // ID, to ensure that they collide. + var newAcctAlias [8]byte + copy(newAcctAlias[:4], sess.ID[:]) + copy(newAcctAlias[4:], randomBytes(4)) + + acctAlias, err := accounts.AccountID(newAcctAlias).ToInt64() + require.NoError(t, err) + + _, err = acctSqlStore.UpdateAccountAliasForTests( + ctx, sqlc.UpdateAccountAliasForTestsParams{ + Alias: acctAlias, + ID: acctID, + }, + ) + require.NoError(t, err) + + // Note that we set add the "session's" root key ID after we have added + // the root key for newAcctAlias. During the migration, if two or more + // root keys exist that have a colliding 4 byte short ID, the last added + // root key will be chosen, as it's not possible to determine which root + // key was actually used when creating the action. I.e. if the root key + // ID for newAcctAlias was added last, that root key would be chosen + // during the migration. This doesn't change if the action gets linked + // to the session or the account though, but just for extra correctness + // we ensure that the session's root key is added last and is therefore + // used. + _ = rStore.addRootKeyFromAcctID(newAcctAlias) + rootKey := rStore.addRootKeyFromIDSuffix(sess.ID) + + actionReq := testActionReq + actionReq.MacaroonRootKeyID = fn.Some(rootKey) + // As the session takes precedence over the account, we expect the + // action to be linked to the session only. + actionReq.SessionID = fn.Some(sess.ID) + actionReq.AccountID = fn.None[accounts.AccountID]() + + action := addAction(t, ctx, boltDB, &actionReq) + + return &expectedResult{ + kvEntries: []*kvEntry{}, + privPairs: make(privacyPairs), + actions: []*Action{action}, + } +} + +// actionWithSessionWithLinkedAccountAndAccount adds an action linked to a +// session (that is itself linked to an account) and where another account +// collides with the action's MacRootKeyID. +// In this scenario, the session should take precedence over the separate +// existing account. As that session do link to a separate account, the action +// should therefore be linked to that session and that session's account. +func actionWithSessionWithLinkedAccountAndAccount(t *testing.T, + ctx context.Context, boltDB *BoltDB, sessStore session.Store, + acctStore accounts.Store, rStore *rootKeyMockStore) *expectedResult { + + // Create a session with a linked account. + sess, acct1, _ := testSessionWithAccount( + t, ctx, sessStore, acctStore, + ) + // Also create another account that will collide with the action. + _, acct2ID := testAccount(t, ctx, acctStore) + + acctSqlStore, ok := acctStore.(*accounts.SQLStore) + require.True(t, ok) + + // Modify the first 4 bytes of the second account alias to match the + // session ID, to ensure that they collide. + var newAcct2Alias [8]byte + copy(newAcct2Alias[:4], sess.ID[:]) + copy(newAcct2Alias[4:], randomBytes(4)) + + acctAlias, err := accounts.AccountID(newAcct2Alias).ToInt64() + require.NoError(t, err) + + _, err = acctSqlStore.UpdateAccountAliasForTests( + ctx, sqlc.UpdateAccountAliasForTestsParams{ + Alias: acctAlias, + ID: acct2ID, + }, + ) + require.NoError(t, err) + + // Note that we set add the "session's" root key ID after we have added + // the root key for newAcct2Alias. During the migration, if two or more + // root keys exist that have a colliding 4 byte short ID, the last added + // root key will be chosen, as it's not possible to determine which root + // key was actually used when creating the action. I.e. if the root key + // ID for newAcct2Alias was added last, that root key would be chosen + // during the migration. This doesn't change if the action gets linked + // to the session or the account though, but just for extra correctness + // we ensure that the session's root key is added last and is therefore + // used. + _ = rStore.addRootKeyFromAcctID(acct1.ID) + _ = rStore.addRootKeyFromAcctID(newAcct2Alias) + rootKey := rStore.addRootKeyFromIDSuffix(sess.ID) + + actionReq := testActionReq + actionReq.MacaroonRootKeyID = fn.Some(rootKey) + // Link the action to the session and the session's linked account, as + // the session takes precedence over acct2. + actionReq.SessionID = fn.Some(sess.ID) + actionReq.AccountID = fn.Some(acct1.ID) + + action := addAction(t, ctx, boltDB, &actionReq) + + return &expectedResult{ + kvEntries: []*kvEntry{}, + privPairs: make(privacyPairs), + actions: []*Action{action}, + } +} + +// randomActions creates 1000 actions, which properties are random. +func randomActions(t *testing.T, ctx context.Context, boltDB *BoltDB, + sessStore session.Store, acctStore accounts.Store, + rStore *rootKeyMockStore) *expectedResult { + + var actions []*Action + + numActions := 1000 + acctSqlStore, ok := acctStore.(*accounts.SQLStore) + require.True(t, ok) + + for i := 0; i < numActions; i++ { + rJson, err := randomJSON(rand.Intn(20)) + require.NoError(t, err) + + actionReq := AddActionReq{ + ActorName: "", + FeatureName: randomString(rand.Intn(20)), + Trigger: randomString(rand.Intn(20)), + Intent: randomString(rand.Intn(20)), + StructuredJsonData: rJson, + RPCMethod: randomRPCMethod(), + RPCParamsJson: []byte(rJson), + MacaroonRootKeyID: fn.None[uint64](), + SessionID: fn.None[session.ID](), + AccountID: fn.None[accounts.AccountID](), + } + + // 1) 50% of the time, we create a session that may be linked to + // the action. + if rand.Intn(2) == 0 { + switch rand.Intn(3) { + // In 1/3 of the cases, we create a session and no + // account that is linked to the action. + case 0: + sess := testSession(t, ctx, sessStore) + + rootKey := rStore.addRootKeyFromIDSuffix( + sess.ID, + ) + actionReq.MacaroonRootKeyID = fn.Some(rootKey) + actionReq.SessionID = fn.Some(sess.ID) + + // In 50% of these cases, we also set an actor + // name to simulate how an action triggered by + // the autopilot would look like in production. + if rand.Intn(2) == 0 { + actionReq.ActorName = randomString( + rand.Intn(10) + 1, + ) + } + + // In 1/3 of the cases, we create a session which will + // be filtered out during the migration, and therefore + // not be linked to the action. + case 1: + sess := randFilteredSession(t, ctx, sessStore) + + // We still set the actionReq.MacaroonIdentifier + // to simulate that the action was created + // to simulate a collision with the session ID, + // but we don't set the actionReq.SessionID as + // action wasn't actually linked to the session. + actionReq.MacaroonRootKeyID = fn.Some( + rStore.addRootKeyFromIDSuffix(sess.ID), + ) + + // In 1/3 of the cases, we create a session with a + // linked account, and link both to the action. + case 2: + sess, acct, _ := testSessionWithAccount( + t, ctx, sessStore, acctStore, + ) + + actionReq.MacaroonRootKeyID = fn.Some( + rStore.addRootKeyFromIDSuffix(sess.ID), + ) + _ = rStore.addRootKeyFromAcctID(acct.ID) + + actionReq.SessionID = fn.Some(sess.ID) + actionReq.AccountID = fn.Some(acct.ID) + } + } + + // 2) 50% of the time, we create one or more accounts that may + // be linked to the action. + if rand.Intn(2) == 0 { + for i := 1; i <= rand.Intn(5)+1; i++ { + var ( + acct *accounts.OffChainBalanceAccount + acctID int64 + + // To ensure that the earliest expired + // account created in the loop is the + // one that may be linked to the action, + // this new account expires later than + // any previously created account in the + // loop. The account will not be linked + // if another account has already been + // linked to the action. + expiry = time.Now().Add( + time.Hour * time.Duration(i*24), + ) + ) + + // In 50% of the cases, we create an expired + // account that will be filtered out during the + // migration though. + expired := rand.Intn(2) == 0 + if expired { + expiry = time.Now().Add(-time.Hour) + } + + acct, acctID = testAccountWithExpiry( + t, ctx, acctStore, expiry, + ) + + // If the action doesn't already have a + // MacaroonIdentifier set, we set it to a root + // key that matches the account ID. + if actionReq.MacaroonRootKeyID.IsNone() { + actionReq.MacaroonRootKeyID = fn.Some( + rStore.addRootKeyFromAcctID( + acct.ID, + ), + ) + } else { + // Else we modify the account ID so + // that it collides with the existing + // actionReq.MacaroonIdentifier. + rootKey := actionReq.MacaroonId() + + var newAcctAlias [8]byte + copy(newAcctAlias[:4], rootKey[:]) + copy(newAcctAlias[4:], randomBytes(4)) + + newAcctID := accounts.AccountID( + newAcctAlias, + ) + acctAlias, err := newAcctID.ToInt64() + require.NoError(t, err) + + _, err = acctSqlStore.UpdateAccountAliasForTests( + ctx, sqlc.UpdateAccountAliasForTestsParams{ + Alias: acctAlias, + ID: acctID, + }, + ) + require.NoError(t, err) + + acct.ID = newAcctID + } + + // We link the account to the action if it isn't + // expired, and when neither a session nor an + // account already been set for the action. When + // session has been set, the session takes + // precedence over accounts, so we don't link + // the account. If an account has already been + // set, it will expire earlier than this current + // account, and therefore has precedence. + if actionReq.SessionID.IsNone() && !expired && + actionReq.AccountID.IsNone() { + + actionReq.AccountID = fn.Some(acct.ID) + } + } + + // In 25% of the cases, we modify the actionReq to + // simulate that the action was created in a way that + // makes it impossible to have been triggered by an + // account event, and therefore the account(s) should + // be filtered out. + // Note that we only do this if no session is set, as + // if the session did have a linked account, that + // session will have precedence and link the action to + // its account. Such an action must therefore have been + // triggered by an account event, and filtering out the + // account in that scenario doesn't make sense. + if actionReq.SessionID.IsNone() && rand.Intn(4) == 0 { + actionReq = randAcctFilteringReq(actionReq) + } + } + + // 3) If the action doesn't have a MacaroonIdentifier yet, that + // means no session or account was created for the action. + // In that scenario, we create a random root key to use as + // the MacaroonIdentifier, to simulate an action that was + // created without any session or account linked to it. + if actionReq.MacaroonRootKeyID.IsNone() { + actionReq.MacaroonRootKeyID = fn.Some( + rStore.addRandomRootKey(), + ) + } + + // 4) Set the actions session and account IDs to match what we + // expect the migrated action to look like. + action := addAction(t, ctx, boltDB, &actionReq) + + // Append the action to the list of expected actions. + actions = append(actions, action) + } + + return &expectedResult{ + kvEntries: []*kvEntry{}, + privPairs: make(privacyPairs), + actions: actions, + } +} + // randomFirewallDBEntries populates the firewalldb with random entries for all // types entries that are currently supported in the firewalldb. -// -// TODO(viktor): Extend this function to also populate it with random action -// entries, once the actions migration has been implemented. func randomFirewallDBEntries(t *testing.T, ctx context.Context, boltDB *BoltDB, sessionStore session.Store, acctStore accounts.Store, rStore *rootKeyMockStore) *expectedResult { @@ -978,12 +1732,222 @@ func randomFirewallDBEntries(t *testing.T, ctx context.Context, privPairs := randomPrivacyPairs( t, ctx, boltDB, sessionStore, acctStore, rStore, ) + actions := randomActions( + t, ctx, boltDB, sessionStore, acctStore, rStore, + ) return &expectedResult{ kvEntries: kvEntries.kvEntries, privPairs: privPairs.privPairs, - actions: []*Action{}, + actions: actions.actions, + } +} + +// addAction is a helper function that adds an action to the boltDB from a +// passed AddActionReq. The function returns the added action, but with all the +// fields set that we expect the migrated action to have, i.e. with the full +// MacaroonRootKeyID set, and with the SessionID and AccountID set to the values +// they are expected to be set to after the boltDB action has been migrated to +// SQL. +func addAction(t *testing.T, ctx context.Context, boltDB *BoltDB, + actionReq *AddActionReq) *Action { + + // We add one second to the clock prior to adding the action, just to + // ensure that the action timestamp is always after the creation time + // of a session or account that it might be linked to. + boltDB.clock = clock.NewTestClock(boltDB.clock.Now().Add(time.Second)) + + aLocator, err := boltDB.AddAction(ctx, actionReq) + require.NoError(t, err) + + locator, ok := aLocator.(*kvdbActionLocator) + require.True(t, ok) + + // Fetch the action that was just added, so that we can return it. + var action *Action + err = boltDB.View(func(tx *bbolt.Tx) error { + mainActionsBucket, err := getBucket(tx, actionsBucketKey) + require.NoError(t, err) + + actionsBucket := mainActionsBucket.Bucket(actionsKey) + require.NotNil(t, actionsBucket) + + action, err = getAction(actionsBucket, locator) + require.NoError(t, err) + + return nil + }) + require.NoError(t, err) + + // Since the values for the MacaroonRootKeyID, SessionID and AccountID + // do differ between boltDB actions and SQL actions, we set them here to + // what we expect them to be after the migration. + action.SessionID = actionReq.SessionID + action.AccountID = actionReq.AccountID + action.MacaroonRootKeyID = actionReq.MacaroonRootKeyID + + return action +} + +// testSession is a helper function that creates and returns a new admin +// macaroon session with a 1 hour expiration. +func testSession(t *testing.T, ctx context.Context, + sessStore session.Store) *session.Session { + + return testSessionWithExpiry( + t, ctx, sessStore, time.Now().Add(time.Hour*24), + ) +} + +// testSessionWithExpiry is a helper function that creates and returns a new +// admin macaroon session with the specified expiry time. +func testSessionWithExpiry(t *testing.T, ctx context.Context, + sessStore session.Store, expiry time.Time) *session.Session { + + sess, err := sessStore.NewSession( + ctx, testSessionName, session.TypeMacaroonAdmin, expiry, + testServerAddress, + ) + require.NoError(t, err) + + return sess +} + +// testAccount is a helper function that creates and returns a new account +// with a 1 hour expiration. The returned int64 is the SQL ID of the account. +func testAccount(t *testing.T, ctx context.Context, + acctStore accounts.Store) (*accounts.OffChainBalanceAccount, int64) { + + return testAccountWithExpiry( + t, ctx, acctStore, time.Now().Add(time.Hour*24), + ) +} + +// testAccountWithExpiry is a helper function that creates and returns a new +// account with the specified expiry time. The returned int64 is the SQL ID of +// the account. +func testAccountWithExpiry(t *testing.T, ctx context.Context, + acctStore accounts.Store, + expiry time.Time) (*accounts.OffChainBalanceAccount, int64) { + + acct, err := acctStore.NewAccount(ctx, 1234, expiry, "") + require.NoError(t, err) + + acctSqlStore, ok := acctStore.(*accounts.SQLStore) + require.True(t, ok) + + aliasInt, err := acct.ID.ToInt64() + require.NoError(t, err) + + acctSqlID, err := acctSqlStore.GetAccountIDByAlias(ctx, aliasInt) + require.NoError(t, err) + + return acct, acctSqlID +} + +// testSessionWithAccount is a helper function that creates and returns a new +// admin macaroon session with a 1 hour expiry that is linked to a newly created +// account with a 1 hour expiration. The returned int64 is the SQL ID of the +// account. +func testSessionWithAccount(t *testing.T, ctx context.Context, + sessStore session.Store, acctStore accounts.Store) (*session.Session, + *accounts.OffChainBalanceAccount, int64) { + + acct, err := acctStore.NewAccount( + ctx, 1234, time.Now().Add(time.Hour*24), "", + ) + require.NoError(t, err) + require.False(t, acct.HasExpired()) + + accountCaveat := checkers.Condition( + macaroons.CondLndCustom, + fmt.Sprintf("%s %x", + accounts.CondAccount, + acct.ID[:], + ), + ) + + sessCaveats := []macaroon.Caveat{ + { + Id: []byte(accountCaveat), + }, } + + sess, err := sessStore.NewSession( + ctx, testSessionName, session.TypeMacaroonAccount, + time.Now().Add(time.Hour), testServerAddress, + session.WithAccount(acct.ID), + session.WithMacaroonRecipe(sessCaveats, nil), + ) + require.NoError(t, err) + + acctSqlStore, ok := acctStore.(*accounts.SQLStore) + require.True(t, ok) + + aliasInt, err := acct.ID.ToInt64() + require.NoError(t, err) + + acctSqlID, err := acctSqlStore.GetAccountIDByAlias(ctx, aliasInt) + require.NoError(t, err) + + return sess, acct, acctSqlID +} + +// randFilteredSession creates and returns a session that will be filtered out +// during the actions migration. The exact reason why the session will be +// filtered out is random. +func randFilteredSession(t *testing.T, ctx context.Context, + sessStore session.Store) *session.Session { + + if rand.Intn(2) == 0 { + // Expired session. + return testSessionWithExpiry( + t, ctx, sessStore, time.Now().Add(-time.Hour), + ) + } else { + // Revoked session. + sess := testSession(t, ctx, sessStore) + + err := sessStore.ShiftState(ctx, sess.ID, session.StateCreated) + require.NoError(t, err) + err = sessStore.ShiftState(ctx, sess.ID, session.StateRevoked) + require.NoError(t, err) + + return sess + } +} + +// randAcctFilteringReq randomly modifies the passed AddActionReq to ensure that +// any account that collides with the action's MacaroonIdentifier will be +// filtered out during the migration. The AddActionReq is also modified to +// remove any previously set AccountID, as the action should not be linked to +// any account after the modification. +// The function returns the modified AddActionReq. +func randAcctFilteringReq(currentReq AddActionReq) AddActionReq { + newReq := currentReq + + switch rand.Intn(8) { + case 0: + newReq.ActorName = randomString(rand.Intn(10) + 1) + case 1: + newReq.RPCMethod = "/lnrpc.Lightning/AddInvoice" + case 2: + newReq.RPCMethod = "/lnrpc.Lightning/SendPayment" + case 3: + newReq.RPCMethod = "/lnrpc.Lightning/SendPaymentSync" + case 4: + newReq.RPCMethod = "/routerrpc.Router/SendPaymentV2" + case 5: + newReq.RPCMethod = "/lnrpc.Lightning/SendToRoute" + case 6: + newReq.RPCMethod = "/lnrpc.Lightning/SendToRouteSync" + case 7: + newReq.RPCMethod = "/routerrpc.Router/SendToRouteV2" + } + + newReq.AccountID = fn.None[accounts.AccountID]() + + return newReq } // randomString generates a random string of the passed length n. @@ -1005,3 +1969,45 @@ func randomBytes(n int) []byte { } return b } + +// RandomJSON generates a JSON string with n random key/value pairs. +// Keys are random strings like "key1", "key2"... +// Values are random ints, floats, or strings. +func randomJSON(n int) (string, error) { + obj := make(map[string]any, n) + for i := 0; i < n; i++ { + key := fmt.Sprintf("key%d", i+1) + + // Randomly choose a type for the value + switch rand.Intn(3) { + case 0: + // random int + obj[key] = rand.Intn(1000) + case 1: + // random float + obj[key] = rand.Float64() * 100 + case 2: + // random string + obj[key] = fmt.Sprintf("val%d", rand.Intn(10000)) + } + } + + bytes, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return "", err + } + return string(bytes), nil +} + +// randomRPCMethod mocks a random RPC method string with 1 to 5 segments, where +// each segment is a random string of 1 to 10 characters, and where a dot +// separates segments. +func randomRPCMethod() string { + method := randomString(rand.Intn(10) + 1) + segments := rand.Intn(5) + for i := 0; i < segments; i++ { + method += "." + randomString(rand.Intn(10)+1) + } + + return method +}