From 38c00a1efb2b9fea6bb7543eae8730ba5b9304b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Thu, 4 Dec 2025 15:33:06 +0100 Subject: [PATCH 01/73] remove unused var --- bridgesync/processor_test.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 56877a47c..2d5766522 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -2358,9 +2358,6 @@ func TestGetClaimsByGlobalIndex_Compact(t *testing.T) { oldProof := types.Proof{} oldProof[0] = common.HexToHash("0x01") - newProof := types.Proof{} - newProof[0] = common.HexToHash("0x02") - testCases := []struct { name string globalIndex *big.Int From e9834afb8304d3d63880e7f9a349d07ee1ac5937 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Sat, 6 Dec 2025 07:55:27 +0100 Subject: [PATCH 02/73] backward let event indexing scaffolding --- bridgesync/downloader.go | 25 +++++++++++++++++++++++++ bridgesync/processor.go | 14 +++++++++++++- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go index 36e644023..708f78fb1 100644 --- a/bridgesync/downloader.go +++ b/bridgesync/downloader.go @@ -52,6 +52,7 @@ var ( setClaimEventSignature = crypto.Keccak256Hash([]byte( "SetClaim(bytes32)", )) + backwardLETEventSignature = crypto.Keccak256Hash([]byte("BackwardLET(uint256,bytes32,uint256,bytes32)")) claimAssetEtrogMethodID = common.Hex2Bytes("ccaa2d11") claimMessageEtrogMethodID = common.Hex2Bytes("f5efcd79") @@ -110,6 +111,10 @@ func buildAppender( appender[removeLegacySovereignTokenEventSignature] = buildRemoveLegacyTokenHandler(bridgeDeployment.agglayerBridgeL2) appender[unsetClaimEventSignature] = buildUnsetClaimEventHandler(bridgeDeployment.agglayerBridgeL2) appender[setClaimEventSignature] = buildSetClaimEventHandler(bridgeDeployment.agglayerBridgeL2) + appender[backwardLETEventSignature] = buildBackwardLETEventHandler(bridgeDeployment.agglayerBridgeL2) + + default: + return nil, fmt.Errorf("unsupported bridge deployment kind: %d", bridgeDeployment.kind) } return appender, nil @@ -419,6 +424,26 @@ func buildSetClaimEventHandler(contract *agglayerbridgel2.Agglayerbridgel2) func } } +// buildBackwardLETEventHandler creates a handler for the BackwardLET event log +func buildBackwardLETEventHandler(contract *agglayerbridgel2.Agglayerbridgel2) func(*sync.EVMBlock, types.Log) error { + return func(b *sync.EVMBlock, l types.Log) error { + event, err := contract.ParseBackwardLET(l) + if err != nil { + return fmt.Errorf("error parsing BackwardLET event log %+v: %w", l, err) + } + + b.Events = append(b.Events, Event{BackwardLET: &BackwardLET{ + BlockNum: b.Num, + BlockPos: uint64(l.Index), + PreviousDepositCount: event.PreviousDepositCount, + PreviousRoot: event.PreviousRoot, + NewDepositCount: event.NewDepositCount, + NewRoot: event.NewRoot, + }}) + return nil + } +} + type Call struct { From common.Address `json:"from"` To common.Address `json:"to"` diff --git a/bridgesync/processor.go b/bridgesync/processor.go index fc7c7f61b..b81f74882 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -484,7 +484,7 @@ func (u *UnsetClaim) String() string { } // SetClaim representation of a SetClaim event, -// that is emitted by the bridge contract when a claim is set. +// that is emitted by the L2 bridge contract when a claim is set. type SetClaim struct { BlockNum uint64 `meddler:"block_num"` BlockPos uint64 `meddler:"block_pos"` @@ -504,6 +504,17 @@ func (s *SetClaim) String() string { globalIndexStr, s.CreatedAt) } +// BackwardLET representation of a BackwardLET event, +// that is emitted by the L2 bridge contract when a LET is rolled back. +type BackwardLET struct { + BlockNum uint64 `meddler:"block_num"` + BlockPos uint64 `meddler:"block_pos"` + PreviousDepositCount *big.Int `meddler:"previous_deposit_count,bigint"` + PreviousRoot common.Hash `meddler:"previous_root,hash"` + NewDepositCount *big.Int `meddler:"new_deposit_count,bigint"` + NewRoot common.Hash `meddler:"new_root,hash"` +} + // Event combination of bridge, claim, token mapping and legacy token migration events type Event struct { Bridge *Bridge @@ -513,6 +524,7 @@ type Event struct { RemoveLegacyToken *RemoveLegacyToken UnsetClaim *UnsetClaim SetClaim *SetClaim + BackwardLET *BackwardLET } func (e Event) String() string { From 003112a006d8dfd36f078d8090154f07d93d2ed7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Mon, 8 Dec 2025 10:24:29 +0100 Subject: [PATCH 03/73] introduce new migration and refactor migrations resolution to be generic --- bridgesync/migrations/bridgesync0011.sql | 14 +++ bridgesync/migrations/migrations.go | 118 ++++++++++------------- bridgesync/migrations/migrations_test.go | 45 +-------- bridgesync/processor.go | 10 ++ 4 files changed, 80 insertions(+), 107 deletions(-) create mode 100644 bridgesync/migrations/bridgesync0011.sql diff --git a/bridgesync/migrations/bridgesync0011.sql b/bridgesync/migrations/bridgesync0011.sql new file mode 100644 index 000000000..a1f5cc817 --- /dev/null +++ b/bridgesync/migrations/bridgesync0011.sql @@ -0,0 +1,14 @@ +-- +migrate Down +DROP TABLE IF EXISTS backward_let; + +-- +migrate Up +CREATE TABLE + backward_let ( + block_num INTEGER NOT NULL REFERENCES block (num) ON DELETE CASCADE, + block_pos INTEGER NOT NULL, + previous_deposit_count TEXT NOT NULL, + previous_root VARCHAR NOT NULL, + new_deposit_count TEXT NOT NULL, + new_root VARCHAR NOT NULL, + PRIMARY KEY (block_num, block_pos) + ); \ No newline at end of file diff --git a/bridgesync/migrations/migrations.go b/bridgesync/migrations/migrations.go index 207a35a11..d718e93e6 100644 --- a/bridgesync/migrations/migrations.go +++ b/bridgesync/migrations/migrations.go @@ -1,86 +1,70 @@ package migrations import ( - _ "embed" + "embed" + "fmt" + "sort" + "strings" "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/db/types" - treeMigrations "github.com/agglayer/aggkit/tree/migrations" + treemigrations "github.com/agglayer/aggkit/tree/migrations" ) -//go:embed bridgesync0001.sql -var mig0001 string +var ( + //go:embed *.sql + migrationFS embed.FS + migrations []types.Migration +) -//go:embed bridgesync0002.sql -var mig0002 string +func init() { + entries, err := migrationFS.ReadDir(".") + if err != nil { + panic(fmt.Errorf("failed to read embedded migrations: %w", err)) + } -//go:embed bridgesync0003.sql -var mig0003 string + for _, e := range entries { + name := e.Name() // e.g. "bridgesync0004.sql" -//go:embed bridgesync0004.sql -var mig0004 string + sqlBytes, err := migrationFS.ReadFile(name) + if err != nil { + panic(err) + } -//go:embed bridgesync0005.sql -var mig0005 string + id := strings.TrimSuffix(name, ".sql") // "bridgesync0004" -//go:embed bridgesync0006.sql -var mig0006 string + migrations = append(migrations, types.Migration{ + ID: id, + SQL: string(sqlBytes), + }) + } -//go:embed bridgesync0007.sql -var mig0007 string + // Ensure deterministic canonical order + sort.Slice(migrations, func(i, j int) bool { + return migrations[i].ID < migrations[j].ID + }) +} -//go:embed bridgesync0008.sql -var mig0008 string +func RunMigrations(dbPath string) error { + // Pre-calculate total length + total := len(migrations) + len(treemigrations.Migrations) -//go:embed bridgesync0009.sql -var mig0009 string + combined := make([]types.Migration, 0, total) + // Copy migrations + combined = append(combined, migrations...) + combined = append(combined, treemigrations.Migrations...) -//go:embed bridgesync0010.sql -var mig0010 string + // Pass the copy to db.RunMigrations + return db.RunMigrations(dbPath, combined) +} -func RunMigrations(dbPath string) error { - migrations := []types.Migration{ - { - ID: "bridgesync0001", - SQL: mig0001, - }, - { - ID: "bridgesync0002", - SQL: mig0002, - }, - { - ID: "bridgesync0003", - SQL: mig0003, - }, - { - ID: "bridgesync0004", - SQL: mig0004, - }, - { - ID: "bridgesync0005", - SQL: mig0005, - }, - { - ID: "bridgesync0006", - SQL: mig0006, - }, - { - ID: "bridgesync0007", - SQL: mig0007, - }, - { - ID: "bridgesync0008", - SQL: mig0008, - }, - { - ID: "bridgesync0009", - SQL: mig0009, - }, - { - ID: "bridgesync0010", - SQL: mig0010, - }, - } - migrations = append(migrations, treeMigrations.Migrations...) - return db.RunMigrations(dbPath, migrations) +// GetUpTo returns all migrations up to and including the migration with the given ID. +func GetUpTo(lastID string) []types.Migration { + idx := sort.Search(len(migrations), func(i int) bool { + return migrations[i].ID > lastID + }) + + out := make([]types.Migration, idx) + copy(out, migrations[:idx]) + return out } diff --git a/bridgesync/migrations/migrations_test.go b/bridgesync/migrations/migrations_test.go index e8217c2bc..a4728a4d7 100644 --- a/bridgesync/migrations/migrations_test.go +++ b/bridgesync/migrations/migrations_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/agglayer/aggkit/db" - "github.com/agglayer/aggkit/db/types" "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/common" migrate "github.com/rubenv/sql-migrate" @@ -266,23 +265,10 @@ func TestMigration0004(t *testing.T) { require.NoError(t, err) defer database.Close() - // Define migrations up to 0003 - migrations := []types.Migration{ - { - ID: "bridgesync0001", - SQL: mig0001, - }, - { - ID: "bridgesync0002", - SQL: mig0002, - }, - { - ID: "bridgesync0003", - SQL: mig0003, - }, - } + // Define migrations up to bridgesync0003 + migrations := GetUpTo("bridgesync0003") - // Run migrations up to 0003 (3 migrations) + // Run migrations up to bridgesync0003 (3 migrations) err = db.RunMigrationsDBExtended(log.GetDefaultLogger(), database, migrations, migrate.Up, 3) require.NoError(t, err) @@ -458,29 +444,8 @@ func TestMigration0006(t *testing.T) { require.NoError(t, err) defer database.Close() - // Define migrations up to 0005 - migrations := []types.Migration{ - { - ID: "bridgesync0001", - SQL: mig0001, - }, - { - ID: "bridgesync0002", - SQL: mig0002, - }, - { - ID: "bridgesync0003", - SQL: mig0003, - }, - { - ID: "bridgesync0004", - SQL: mig0004, - }, - { - ID: "bridgesync0005", - SQL: mig0005, - }, - } + // Define migrations up to bridgesync0005 + migrations := GetUpTo("bridgesync0005") // Run migrations up to 0005 (5 migrations) err = db.RunMigrationsDBExtended(log.GetDefaultLogger(), database, migrations, migrate.Up, 5) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index b81f74882..400c4e995 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -51,6 +51,9 @@ const ( // setClaimTableName is the name of the table that stores set claim events setClaimTableName = "set_claim" + // backwardLETTableName is the name of the table that stores backward local exit tree events + backwardLETTableName = "backward_let" + // nilStr holds nil string nilStr = "nil" ) @@ -1344,6 +1347,13 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } } + + if event.BackwardLET != nil { + if err = meddler.Insert(tx, backwardLETTableName, event.BackwardLET); err != nil { + p.log.Errorf("failed to insert backward LET event at block %d: %v", block.Num, err) + return err + } + } } if err := tx.Commit(); err != nil { From c3dbef4bb45a85683924561ae18520edb060d913 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Mon, 8 Dec 2025 14:20:28 +0100 Subject: [PATCH 04/73] TestBuildAppender increase coverage --- bridgesync/downloader_test.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/bridgesync/downloader_test.go b/bridgesync/downloader_test.go index b622bda2d..b9bba2a30 100644 --- a/bridgesync/downloader_test.go +++ b/bridgesync/downloader_test.go @@ -346,6 +346,32 @@ func TestBuildAppender(t *testing.T) { return l, nil }, }, + { + name: "backwardLETSignature appender", + eventSignature: backwardLETEventSignature, + deploymentKind: SovereignChain, + logBuilder: func() (types.Log, error) { + event, err := bridgeL2Abi.EventByID(backwardLETEventSignature) + if err != nil { + return types.Log{}, err + } + + previousDepositCount := big.NewInt(10) + previousRoot := common.HexToHash("0xdeadbeef") + newDepositCount := big.NewInt(8) + newRoot := common.HexToHash("0x5ca1e") + data, err := event.Inputs.Pack(previousDepositCount, previousRoot, newDepositCount, newRoot) + if err != nil { + return types.Log{}, err + } + + l := types.Log{ + Topics: []common.Hash{backwardLETEventSignature}, + Data: data, + } + return l, nil + }, + }, } for _, tt := range tests { From 928cf5f4401b29c3c182b29c927745ccaabfa825 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Mon, 8 Dec 2025 14:53:09 +0100 Subject: [PATCH 05/73] delete bridges on backward let --- bridgesync/processor.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 400c4e995..75b70d774 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1349,6 +1349,15 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } if event.BackwardLET != nil { + // TODO: update the exit tree accordingly + deleteBridges := fmt.Sprintf("DELETE from %s WHERE deposit_count >= $1", bridgeTableName) + _, err := tx.Exec(deleteBridges, event.BackwardLET.NewDepositCount) + if err != nil { + p.log.Errorf("failed to remove bridges whose deposit count is greater than or equal to %d", + event.BackwardLET.NewDepositCount) + return err + } + if err = meddler.Insert(tx, backwardLETTableName, event.BackwardLET); err != nil { p.log.Errorf("failed to insert backward LET event at block %d: %v", block.Num, err) return err From c72e1028df32b97c25d2a479b2741363f66f72f9 Mon Sep 17 00:00:00 2001 From: Goran Rojovic <100121253+goran-ethernal@users.noreply.github.com> Date: Mon, 8 Dec 2025 15:05:00 +0100 Subject: [PATCH 06/73] feat: backwards on merkle tree (#1378) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## 🔄 Changes Summary This PR adds a function to rewind merkle tree to a previous root index. (This will be used for backwarding of `LocalExitTree`). ## ⚠️ Breaking Changes NA ## 📋 Config Updates NA ## ✅ Testing - 🤖 **Automatic**: `aggkit` CI --- bridgesync/processor.go | 2 +- db/mocks/mock_d_ber.go | 70 ++++++++++++++ db/mocks/mock_querier.go | 70 ++++++++++++++ db/mocks/mock_sql_txer.go | 70 ++++++++++++++ db/mocks/mock_txer.go | 70 ++++++++++++++ db/types/interface.go | 1 + tree/tree.go | 25 +++-- tree/tree_test.go | 108 ++++++++++++++++++++++ tree/types/interfaces.go | 1 + tree/types/mocks/mock_full_treer.go | 48 ++++++++++ tree/types/mocks/mock_reorganize_treer.go | 48 ++++++++++ 11 files changed, 503 insertions(+), 10 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 75b70d774..462237fd2 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -595,7 +595,7 @@ func (b BridgeSyncRuntimeData) IsCompatible(storage BridgeSyncRuntimeData) error type processor struct { syncerID string db *sql.DB - exitTree *tree.AppendOnlyTree + exitTree types.FullTreer log *log.Logger mu mutex.RWMutex halted bool diff --git a/db/mocks/mock_d_ber.go b/db/mocks/mock_d_ber.go index c60585aae..76f54538d 100644 --- a/db/mocks/mock_d_ber.go +++ b/db/mocks/mock_d_ber.go @@ -150,6 +150,76 @@ func (_c *DBer_Exec_Call) RunAndReturn(run func(string, ...interface{}) (sql.Res return _c } +// ExecContext provides a mock function with given fields: ctx, query, args +func (_m *DBer) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + var _ca []interface{} + _ca = append(_ca, ctx, query) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ExecContext") + } + + var r0 sql.Result + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) (sql.Result, error)); ok { + return rf(ctx, query, args...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) sql.Result); ok { + r0 = rf(ctx, query, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(sql.Result) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, ...interface{}) error); ok { + r1 = rf(ctx, query, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DBer_ExecContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecContext' +type DBer_ExecContext_Call struct { + *mock.Call +} + +// ExecContext is a helper method to define mock.On call +// - ctx context.Context +// - query string +// - args ...interface{} +func (_e *DBer_Expecter) ExecContext(ctx interface{}, query interface{}, args ...interface{}) *DBer_ExecContext_Call { + return &DBer_ExecContext_Call{Call: _e.mock.On("ExecContext", + append([]interface{}{ctx, query}, args...)...)} +} + +func (_c *DBer_ExecContext_Call) Run(run func(ctx context.Context, query string, args ...interface{})) *DBer_ExecContext_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(context.Context), args[1].(string), variadicArgs...) + }) + return _c +} + +func (_c *DBer_ExecContext_Call) Return(_a0 sql.Result, _a1 error) *DBer_ExecContext_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DBer_ExecContext_Call) RunAndReturn(run func(context.Context, string, ...interface{}) (sql.Result, error)) *DBer_ExecContext_Call { + _c.Call.Return(run) + return _c +} + // Query provides a mock function with given fields: query, args func (_m *DBer) Query(query string, args ...interface{}) (*sql.Rows, error) { var _ca []interface{} diff --git a/db/mocks/mock_querier.go b/db/mocks/mock_querier.go index 719ef6a1d..5bf4dc754 100644 --- a/db/mocks/mock_querier.go +++ b/db/mocks/mock_querier.go @@ -91,6 +91,76 @@ func (_c *Querier_Exec_Call) RunAndReturn(run func(string, ...interface{}) (sql. return _c } +// ExecContext provides a mock function with given fields: ctx, query, args +func (_m *Querier) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + var _ca []interface{} + _ca = append(_ca, ctx, query) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ExecContext") + } + + var r0 sql.Result + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) (sql.Result, error)); ok { + return rf(ctx, query, args...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) sql.Result); ok { + r0 = rf(ctx, query, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(sql.Result) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, ...interface{}) error); ok { + r1 = rf(ctx, query, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Querier_ExecContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecContext' +type Querier_ExecContext_Call struct { + *mock.Call +} + +// ExecContext is a helper method to define mock.On call +// - ctx context.Context +// - query string +// - args ...interface{} +func (_e *Querier_Expecter) ExecContext(ctx interface{}, query interface{}, args ...interface{}) *Querier_ExecContext_Call { + return &Querier_ExecContext_Call{Call: _e.mock.On("ExecContext", + append([]interface{}{ctx, query}, args...)...)} +} + +func (_c *Querier_ExecContext_Call) Run(run func(ctx context.Context, query string, args ...interface{})) *Querier_ExecContext_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(context.Context), args[1].(string), variadicArgs...) + }) + return _c +} + +func (_c *Querier_ExecContext_Call) Return(_a0 sql.Result, _a1 error) *Querier_ExecContext_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Querier_ExecContext_Call) RunAndReturn(run func(context.Context, string, ...interface{}) (sql.Result, error)) *Querier_ExecContext_Call { + _c.Call.Return(run) + return _c +} + // Query provides a mock function with given fields: query, args func (_m *Querier) Query(query string, args ...interface{}) (*sql.Rows, error) { var _ca []interface{} diff --git a/db/mocks/mock_sql_txer.go b/db/mocks/mock_sql_txer.go index 4da1b269f..0730d62fb 100644 --- a/db/mocks/mock_sql_txer.go +++ b/db/mocks/mock_sql_txer.go @@ -136,6 +136,76 @@ func (_c *SQLTxer_Exec_Call) RunAndReturn(run func(string, ...interface{}) (sql. return _c } +// ExecContext provides a mock function with given fields: ctx, query, args +func (_m *SQLTxer) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + var _ca []interface{} + _ca = append(_ca, ctx, query) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ExecContext") + } + + var r0 sql.Result + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) (sql.Result, error)); ok { + return rf(ctx, query, args...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) sql.Result); ok { + r0 = rf(ctx, query, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(sql.Result) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, ...interface{}) error); ok { + r1 = rf(ctx, query, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SQLTxer_ExecContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecContext' +type SQLTxer_ExecContext_Call struct { + *mock.Call +} + +// ExecContext is a helper method to define mock.On call +// - ctx context.Context +// - query string +// - args ...interface{} +func (_e *SQLTxer_Expecter) ExecContext(ctx interface{}, query interface{}, args ...interface{}) *SQLTxer_ExecContext_Call { + return &SQLTxer_ExecContext_Call{Call: _e.mock.On("ExecContext", + append([]interface{}{ctx, query}, args...)...)} +} + +func (_c *SQLTxer_ExecContext_Call) Run(run func(ctx context.Context, query string, args ...interface{})) *SQLTxer_ExecContext_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(context.Context), args[1].(string), variadicArgs...) + }) + return _c +} + +func (_c *SQLTxer_ExecContext_Call) Return(_a0 sql.Result, _a1 error) *SQLTxer_ExecContext_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *SQLTxer_ExecContext_Call) RunAndReturn(run func(context.Context, string, ...interface{}) (sql.Result, error)) *SQLTxer_ExecContext_Call { + _c.Call.Return(run) + return _c +} + // Query provides a mock function with given fields: query, args func (_m *SQLTxer) Query(query string, args ...interface{}) (*sql.Rows, error) { var _ca []interface{} diff --git a/db/mocks/mock_txer.go b/db/mocks/mock_txer.go index 510a56462..75e613ae9 100644 --- a/db/mocks/mock_txer.go +++ b/db/mocks/mock_txer.go @@ -202,6 +202,76 @@ func (_c *Txer_Exec_Call) RunAndReturn(run func(string, ...interface{}) (sql.Res return _c } +// ExecContext provides a mock function with given fields: ctx, query, args +func (_m *Txer) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + var _ca []interface{} + _ca = append(_ca, ctx, query) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ExecContext") + } + + var r0 sql.Result + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) (sql.Result, error)); ok { + return rf(ctx, query, args...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) sql.Result); ok { + r0 = rf(ctx, query, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(sql.Result) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, ...interface{}) error); ok { + r1 = rf(ctx, query, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Txer_ExecContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecContext' +type Txer_ExecContext_Call struct { + *mock.Call +} + +// ExecContext is a helper method to define mock.On call +// - ctx context.Context +// - query string +// - args ...interface{} +func (_e *Txer_Expecter) ExecContext(ctx interface{}, query interface{}, args ...interface{}) *Txer_ExecContext_Call { + return &Txer_ExecContext_Call{Call: _e.mock.On("ExecContext", + append([]interface{}{ctx, query}, args...)...)} +} + +func (_c *Txer_ExecContext_Call) Run(run func(ctx context.Context, query string, args ...interface{})) *Txer_ExecContext_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(context.Context), args[1].(string), variadicArgs...) + }) + return _c +} + +func (_c *Txer_ExecContext_Call) Return(_a0 sql.Result, _a1 error) *Txer_ExecContext_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Txer_ExecContext_Call) RunAndReturn(run func(context.Context, string, ...interface{}) (sql.Result, error)) *Txer_ExecContext_Call { + _c.Call.Return(run) + return _c +} + // Query provides a mock function with given fields: query, args func (_m *Txer) Query(query string, args ...interface{}) (*sql.Rows, error) { var _ca []interface{} diff --git a/db/types/interface.go b/db/types/interface.go index ae2b4aebe..ff54d3ac7 100644 --- a/db/types/interface.go +++ b/db/types/interface.go @@ -11,6 +11,7 @@ import ( // Implementations of this interface can be used to generalize database access logic. type Querier interface { Exec(query string, args ...interface{}) (sql.Result, error) + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) Query(query string, args ...interface{}) (*sql.Rows, error) QueryRow(query string, args ...interface{}) *sql.Row QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) diff --git a/tree/tree.go b/tree/tree.go index 019d591c4..e685c3b0c 100644 --- a/tree/tree.go +++ b/tree/tree.go @@ -133,14 +133,11 @@ func (t *Tree) getRHTNode(tx dbtypes.Querier, nodeHash common.Hash) (*types.Tree } func (t *Tree) storeNodes(tx dbtypes.Txer, nodes []types.TreeNode) error { - for i := 0; i < len(nodes); i++ { - if err := meddler.Insert(tx, t.rhtTable, &nodes[i]); err != nil { - if sqliteErr, ok := db.SQLiteErr(err); ok { - if sqliteErr.ExtendedCode == db.UniqueConstrain { - // ignore repeated entries. This is likely to happen due to not - // cleaning RHT when reorg - continue - } + for _, node := range nodes { + if err := meddler.Insert(tx, t.rhtTable, &node); err != nil { + if sqliteErr, ok := db.SQLiteErr(err); ok && sqliteErr.ExtendedCode == db.UniqueConstrain { + // ignore repeated entries + continue } return err } @@ -246,12 +243,22 @@ func (t *Tree) Reorg(tx dbtypes.Txer, firstReorgedBlock uint64) error { return err } +// BackwardToIndex deletes all the roots with index higher than targetIndex +func (t *Tree) BackwardToIndex(ctx context.Context, tx dbtypes.Txer, targetIndex uint32) error { + _, err := tx.ExecContext( + ctx, + fmt.Sprintf(`DELETE FROM %s WHERE position > $1`, t.rootTable), + targetIndex, + ) + return err +} + // CalculateRoot calculates the Merkle Root based on the leaf and proof of inclusion func CalculateRoot(leafHash common.Hash, proof [types.DefaultHeight]common.Hash, index uint32) common.Hash { node := leafHash // Compute the Merkle root - for height := uint8(0); height < types.DefaultHeight; height++ { + for height := range types.DefaultHeight { if (index>>height)&1 == 1 { node = crypto.Keccak256Hash(proof[height].Bytes(), node.Bytes()) } else { diff --git a/tree/tree_test.go b/tree/tree_test.go index 2021214b8..771b1eb55 100644 --- a/tree/tree_test.go +++ b/tree/tree_test.go @@ -306,6 +306,114 @@ func TestVerifyProof(t *testing.T) { } } +func TestTree_BackwardToIndex(t *testing.T) { + t.Parallel() + ctx := context.Background() + + t.Run("deletes roots with index higher than targetIndex", func(t *testing.T) { + t.Parallel() + + treeDB := createTreeDBForTest(t) + tree := NewAppendOnlyTree(treeDB, "") + + // Add 8 leaves (roots with indices 0..7) + putTestLeaves(t, tree, treeDB, 8, 0) + + // Confirm all roots exist + for i := range 8 { + root, err := tree.GetRootByIndex(ctx, uint32(i)) + require.NoError(t, err) + require.Equal(t, uint32(i), root.Index) + } + + tx, err := db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + + // Delete roots with index > 4 + require.NoError(t, tree.BackwardToIndex(ctx, tx, 4)) + require.NoError(t, tx.Commit()) + + // Roots with index 0..4 should exist + for i := 0; i <= 4; i++ { + root, err := tree.GetRootByIndex(ctx, uint32(i)) + require.NoError(t, err) + require.Equal(t, uint32(i), root.Index) + } + + // Roots with index 5..7 should not exist + for i := 5; i < 8; i++ { + _, err := tree.GetRootByIndex(ctx, uint32(i)) + require.Error(t, err) + require.ErrorIs(t, err, db.ErrNotFound) + } + + // Add more leaves to confirm tree is still functional + putTestLeaves(t, tree, treeDB, 3, 5) // adding leaves with indices 5,6,7 + + // Confirm new roots exist + for i := range 8 { + root, err := tree.GetRootByIndex(ctx, uint32(i)) + require.NoError(t, err) + require.Equal(t, uint32(i), root.Index) + } + }) + + t.Run("no roots deleted if none above targetIndex", func(t *testing.T) { + t.Parallel() + + treeDB := createTreeDBForTest(t) + tree := NewAppendOnlyTree(treeDB, "") + + putTestLeaves(t, tree, treeDB, 3, 0) + + tx, err := db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + + require.NoError(t, tree.BackwardToIndex(ctx, tx, 10)) + require.NoError(t, tx.Commit()) + + // All roots should still exist + for i := range 3 { + root, err := tree.GetRootByIndex(ctx, uint32(i)) + require.NoError(t, err) + require.Equal(t, uint32(i), root.Index) + } + }) + + t.Run("handles empty table gracefully", func(t *testing.T) { + t.Parallel() + + treeDB := createTreeDBForTest(t) + tree := NewAppendOnlyTree(treeDB, "") + + tx, err := db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + + require.NoError(t, tree.BackwardToIndex(ctx, tx, 0)) + }) + + t.Run("returns error on database failure", func(t *testing.T) { + t.Parallel() + + dbPath := path.Join(t.TempDir(), "tree_BackwardToIndex_dberr.sqlite") + require.NoError(t, migrations.RunMigrations(dbPath)) + treeDB, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + + // Intentionally invalid table name + tree := &Tree{ + db: treeDB, + rootTable: "nonexistent_table", + } + + tx, err := db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + + err = tree.BackwardToIndex(ctx, tx, 0) + require.ErrorContains(t, err, "no such table") + }) +} + func createTreeDBForTest(t *testing.T) *sql.DB { t.Helper() diff --git a/tree/types/interfaces.go b/tree/types/interfaces.go index 997393559..670b71610 100644 --- a/tree/types/interfaces.go +++ b/tree/types/interfaces.go @@ -25,6 +25,7 @@ type LeafWriter interface { type ReorganizeTreer interface { ReadTreer Reorg(tx dbtypes.Txer, firstReorgedBlock uint64) error + BackwardToIndex(ctx context.Context, tx dbtypes.Txer, targetIndex uint32) error } // FullTreer = fully-capable tree (read, write, reorg) diff --git a/tree/types/mocks/mock_full_treer.go b/tree/types/mocks/mock_full_treer.go index 3ab36cb35..91187f9ff 100644 --- a/tree/types/mocks/mock_full_treer.go +++ b/tree/types/mocks/mock_full_treer.go @@ -27,6 +27,54 @@ func (_m *FullTreer) EXPECT() *FullTreer_Expecter { return &FullTreer_Expecter{mock: &_m.Mock} } +// BackwardToIndex provides a mock function with given fields: ctx, tx, targetIndex +func (_m *FullTreer) BackwardToIndex(ctx context.Context, tx types.Txer, targetIndex uint32) error { + ret := _m.Called(ctx, tx, targetIndex) + + if len(ret) == 0 { + panic("no return value specified for BackwardToIndex") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.Txer, uint32) error); ok { + r0 = rf(ctx, tx, targetIndex) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FullTreer_BackwardToIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BackwardToIndex' +type FullTreer_BackwardToIndex_Call struct { + *mock.Call +} + +// BackwardToIndex is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Txer +// - targetIndex uint32 +func (_e *FullTreer_Expecter) BackwardToIndex(ctx interface{}, tx interface{}, targetIndex interface{}) *FullTreer_BackwardToIndex_Call { + return &FullTreer_BackwardToIndex_Call{Call: _e.mock.On("BackwardToIndex", ctx, tx, targetIndex)} +} + +func (_c *FullTreer_BackwardToIndex_Call) Run(run func(ctx context.Context, tx types.Txer, targetIndex uint32)) *FullTreer_BackwardToIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Txer), args[2].(uint32)) + }) + return _c +} + +func (_c *FullTreer_BackwardToIndex_Call) Return(_a0 error) *FullTreer_BackwardToIndex_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *FullTreer_BackwardToIndex_Call) RunAndReturn(run func(context.Context, types.Txer, uint32) error) *FullTreer_BackwardToIndex_Call { + _c.Call.Return(run) + return _c +} + // GetLastRoot provides a mock function with given fields: tx func (_m *FullTreer) GetLastRoot(tx types.Querier) (treetypes.Root, error) { ret := _m.Called(tx) diff --git a/tree/types/mocks/mock_reorganize_treer.go b/tree/types/mocks/mock_reorganize_treer.go index cc97ea5a4..2f8b51d12 100644 --- a/tree/types/mocks/mock_reorganize_treer.go +++ b/tree/types/mocks/mock_reorganize_treer.go @@ -27,6 +27,54 @@ func (_m *ReorganizeTreer) EXPECT() *ReorganizeTreer_Expecter { return &ReorganizeTreer_Expecter{mock: &_m.Mock} } +// BackwardToIndex provides a mock function with given fields: ctx, tx, targetIndex +func (_m *ReorganizeTreer) BackwardToIndex(ctx context.Context, tx types.Txer, targetIndex uint32) error { + ret := _m.Called(ctx, tx, targetIndex) + + if len(ret) == 0 { + panic("no return value specified for BackwardToIndex") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.Txer, uint32) error); ok { + r0 = rf(ctx, tx, targetIndex) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ReorganizeTreer_BackwardToIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BackwardToIndex' +type ReorganizeTreer_BackwardToIndex_Call struct { + *mock.Call +} + +// BackwardToIndex is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Txer +// - targetIndex uint32 +func (_e *ReorganizeTreer_Expecter) BackwardToIndex(ctx interface{}, tx interface{}, targetIndex interface{}) *ReorganizeTreer_BackwardToIndex_Call { + return &ReorganizeTreer_BackwardToIndex_Call{Call: _e.mock.On("BackwardToIndex", ctx, tx, targetIndex)} +} + +func (_c *ReorganizeTreer_BackwardToIndex_Call) Run(run func(ctx context.Context, tx types.Txer, targetIndex uint32)) *ReorganizeTreer_BackwardToIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Txer), args[2].(uint32)) + }) + return _c +} + +func (_c *ReorganizeTreer_BackwardToIndex_Call) Return(_a0 error) *ReorganizeTreer_BackwardToIndex_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ReorganizeTreer_BackwardToIndex_Call) RunAndReturn(run func(context.Context, types.Txer, uint32) error) *ReorganizeTreer_BackwardToIndex_Call { + _c.Call.Return(run) + return _c +} + // GetLastRoot provides a mock function with given fields: tx func (_m *ReorganizeTreer) GetLastRoot(tx types.Querier) (treetypes.Root, error) { ret := _m.Called(tx) From aea7e8f0a15d62d0075b542335ab1b94ed582803 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Mon, 8 Dec 2025 15:36:52 +0100 Subject: [PATCH 07/73] update exit tree on backward LET --- bridgesync/processor.go | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 462237fd2..76b5eeaf2 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -6,6 +6,7 @@ import ( "encoding/binary" "errors" "fmt" + "math" "math/big" "regexp" "strings" @@ -1349,9 +1350,26 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } if event.BackwardLET != nil { - // TODO: update the exit tree accordingly + ndc := event.BackwardLET.NewDepositCount + + // Check bounds + if !ndc.IsUint64() { + return fmt.Errorf("NewDepositCount=%s does not fit into uint64", ndc.String()) + } + + ndcU64 := ndc.Uint64() + if ndcU64 > math.MaxUint32 { + return fmt.Errorf("NewDepositCount=%d exceeds uint32 max (%d)", ndcU64, uint32(math.MaxUint32)) + } + + ndcU32 := uint32(ndcU64) + + if err := p.exitTree.BackwardToIndex(ctx, tx, ndcU32); err != nil { + p.log.Errorf("failed to backward local exit tree to %d deposit count", ndcU32) + return err + } deleteBridges := fmt.Sprintf("DELETE from %s WHERE deposit_count >= $1", bridgeTableName) - _, err := tx.Exec(deleteBridges, event.BackwardLET.NewDepositCount) + _, err := tx.Exec(deleteBridges, ndc) if err != nil { p.log.Errorf("failed to remove bridges whose deposit count is greater than or equal to %d", event.BackwardLET.NewDepositCount) @@ -1359,7 +1377,7 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } if err = meddler.Insert(tx, backwardLETTableName, event.BackwardLET); err != nil { - p.log.Errorf("failed to insert backward LET event at block %d: %v", block.Num, err) + p.log.Errorf("failed to insert backward local exit tree event at block %d: %v", block.Num, err) return err } } From 6ae4adbc4ad3022a18453d83818b282d95d10f63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Tue, 9 Dec 2025 09:50:46 +0100 Subject: [PATCH 08/73] formatting --- bridgesync/processor.go | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 76b5eeaf2..95b8d6861 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1350,26 +1350,25 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } if event.BackwardLET != nil { - ndc := event.BackwardLET.NewDepositCount - - // Check bounds - if !ndc.IsUint64() { - return fmt.Errorf("NewDepositCount=%s does not fit into uint64", ndc.String()) + newDepositCount := event.BackwardLET.NewDepositCount + if !newDepositCount.IsUint64() { + return fmt.Errorf("NewDepositCount=%d does not fit into uint64", newDepositCount) } - ndcU64 := ndc.Uint64() - if ndcU64 > math.MaxUint32 { - return fmt.Errorf("NewDepositCount=%d exceeds uint32 max (%d)", ndcU64, uint32(math.MaxUint32)) + newDepositCountU64 := newDepositCount.Uint64() + if newDepositCountU64 > math.MaxUint32 { + return fmt.Errorf("NewDepositCount=%d exceeds uint32 max (%d)", newDepositCountU64, uint32(math.MaxUint32)) } - ndcU32 := uint32(ndcU64) - - if err := p.exitTree.BackwardToIndex(ctx, tx, ndcU32); err != nil { - p.log.Errorf("failed to backward local exit tree to %d deposit count", ndcU32) + newDepositCountU32 := uint32(newDepositCountU64) + if err := p.exitTree.BackwardToIndex(ctx, tx, newDepositCountU32); err != nil { + p.log.Errorf("failed to backward local exit tree to %d deposit count", newDepositCountU32) return err } + + // remove all the bridges whose deposit_count is >= than the one captured by the BackwardLET event deleteBridges := fmt.Sprintf("DELETE from %s WHERE deposit_count >= $1", bridgeTableName) - _, err := tx.Exec(deleteBridges, ndc) + _, err := tx.Exec(deleteBridges, newDepositCount) if err != nil { p.log.Errorf("failed to remove bridges whose deposit count is greater than or equal to %d", event.BackwardLET.NewDepositCount) From b2dccc5562a03e384edb94c776ee106e6a8573bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Tue, 9 Dec 2025 11:14:08 +0100 Subject: [PATCH 09/73] remove bridges whose deposit count is greater than new deposit count --- bridgesync/processor.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 95b8d6861..ac4d63f3d 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1366,8 +1366,8 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } - // remove all the bridges whose deposit_count is >= than the one captured by the BackwardLET event - deleteBridges := fmt.Sprintf("DELETE from %s WHERE deposit_count >= $1", bridgeTableName) + // remove all the bridges whose deposit_count is greater than the one captured by the BackwardLET event + deleteBridges := fmt.Sprintf("DELETE from %s WHERE deposit_count > $1", bridgeTableName) _, err := tx.Exec(deleteBridges, newDepositCount) if err != nil { p.log.Errorf("failed to remove bridges whose deposit count is greater than or equal to %d", From aa2cd0ce3f06fbf949a6510de77791fd4d94a0da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Tue, 9 Dec 2025 11:15:44 +0100 Subject: [PATCH 10/73] remove InvalidClaim struct --- bridgesync/processor.go | 49 ----------------------------------------- 1 file changed, 49 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index ac4d63f3d..185078595 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -350,55 +350,6 @@ func (c *Claim) decodePreEtrogCalldata(data []any) (bool, error) { return true, nil } -type InvalidClaim struct { - // claim struct fields - BlockNum uint64 `meddler:"block_num"` - BlockPos uint64 `meddler:"block_pos"` - TxHash common.Hash `meddler:"tx_hash,hash"` - GlobalIndex *big.Int `meddler:"global_index,bigint"` - OriginNetwork uint32 `meddler:"origin_network"` - OriginAddress common.Address `meddler:"origin_address"` - DestinationAddress common.Address `meddler:"destination_address"` - Amount *big.Int `meddler:"amount,bigint"` - ProofLocalExitRoot types.Proof `meddler:"proof_local_exit_root,merkleproof"` - ProofRollupExitRoot types.Proof `meddler:"proof_rollup_exit_root,merkleproof"` - MainnetExitRoot common.Hash `meddler:"mainnet_exit_root,hash"` - RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` - GlobalExitRoot common.Hash `meddler:"global_exit_root,hash"` - DestinationNetwork uint32 `meddler:"destination_network"` - Metadata []byte `meddler:"metadata"` - IsMessage bool `meddler:"is_message"` - BlockTimestamp uint64 `meddler:"block_timestamp"` - // additional fields - Reason string `meddler:"reason"` - CreatedAt uint64 `meddler:"created_at"` -} - -// NewInvalidClaim creates a new InvalidClaim from a Claim and a reason -func NewInvalidClaim(c *Claim, reason string) *InvalidClaim { - return &InvalidClaim{ - BlockNum: c.BlockNum, - BlockPos: c.BlockPos, - TxHash: c.TxHash, - GlobalIndex: c.GlobalIndex, - OriginNetwork: c.OriginNetwork, - OriginAddress: c.OriginAddress, - DestinationAddress: c.DestinationAddress, - Amount: c.Amount, - ProofLocalExitRoot: c.ProofLocalExitRoot, - ProofRollupExitRoot: c.ProofRollupExitRoot, - MainnetExitRoot: c.MainnetExitRoot, - RollupExitRoot: c.RollupExitRoot, - GlobalExitRoot: c.GlobalExitRoot, - DestinationNetwork: c.DestinationNetwork, - Metadata: c.Metadata, - IsMessage: c.IsMessage, - BlockTimestamp: c.BlockTimestamp, - Reason: reason, - CreatedAt: uint64(time.Now().UTC().Unix()), - } -} - // TokenMapping representation of a NewWrappedToken event, that is emitted by the bridge contract type TokenMapping struct { BlockNum uint64 `meddler:"block_num"` From a9c7035df3d2adfeb8c82dbc4182c6e91c5a990b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Wed, 10 Dec 2025 08:55:31 +0100 Subject: [PATCH 11/73] correctly determine leaf index --- bridgesync/processor.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 185078595..2a077ff51 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1311,9 +1311,9 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return fmt.Errorf("NewDepositCount=%d exceeds uint32 max (%d)", newDepositCountU64, uint32(math.MaxUint32)) } - newDepositCountU32 := uint32(newDepositCountU64) - if err := p.exitTree.BackwardToIndex(ctx, tx, newDepositCountU32); err != nil { - p.log.Errorf("failed to backward local exit tree to %d deposit count", newDepositCountU32) + leafIndex := uint32(newDepositCountU64 - 1) + if err := p.exitTree.BackwardToIndex(ctx, tx, leafIndex); err != nil { + p.log.Errorf("failed to backward local exit tree to %d deposit count", leafIndex) return err } From 44774fa916c3d72a3f37152c87a08163e24c3371 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Thu, 11 Dec 2025 14:53:49 +0100 Subject: [PATCH 12/73] archive bridges to bridge_archive table and handle reorgs --- bridgesync/migrations/bridgesync0011.sql | 67 ++++++++++++++++++++++-- bridgesync/processor.go | 50 ++++++++++++++++++ 2 files changed, 114 insertions(+), 3 deletions(-) diff --git a/bridgesync/migrations/bridgesync0011.sql b/bridgesync/migrations/bridgesync0011.sql index a1f5cc817..67df93dcf 100644 --- a/bridgesync/migrations/bridgesync0011.sql +++ b/bridgesync/migrations/bridgesync0011.sql @@ -1,9 +1,12 @@ -- +migrate Down DROP TABLE IF EXISTS backward_let; +DROP TRIGGER IF EXISTS archive_bridge_before_delete; + +DROP TABLE IF EXISTS bridge_archive; + -- +migrate Up -CREATE TABLE - backward_let ( +CREATE TABLE IF NOT EXISTS backward_let ( block_num INTEGER NOT NULL REFERENCES block (num) ON DELETE CASCADE, block_pos INTEGER NOT NULL, previous_deposit_count TEXT NOT NULL, @@ -11,4 +14,62 @@ CREATE TABLE new_deposit_count TEXT NOT NULL, new_root VARCHAR NOT NULL, PRIMARY KEY (block_num, block_pos) - ); \ No newline at end of file + ); + +------------------------------------------------------------------------------ +-- Create archive table +------------------------------------------------------------------------------ +CREATE TABLE IF NOT EXISTS bridge_archive ( + deposit_count INTEGER PRIMARY KEY, + block_num INTEGER NOT NULL, + block_pos INTEGER NOT NULL, + leaf_type INTEGER NOT NULL, + origin_network INTEGER NOT NULL, + origin_address VARCHAR NOT NULL, + destination_network INTEGER NOT NULL, + destination_address VARCHAR NOT NULL, + amount TEXT NOT NULL, + metadata BLOB, + tx_hash VARCHAR, + block_timestamp INTEGER, + txn_sender VARCHAR + ); + +------------------------------------------------------------------------------ +-- Create BEFORE DELETE trigger: archive only deleted rows +------------------------------------------------------------------------------ +CREATE TRIGGER IF NOT EXISTS archive_bridge_before_delete +BEFORE DELETE ON bridge +FOR EACH ROW +BEGIN + INSERT INTO bridge_archive ( + deposit_count, + block_num, + block_pos, + leaf_type, + origin_network, + origin_address, + destination_network, + destination_address, + amount, + metadata, + tx_hash, + block_timestamp, + txn_sender + ) + VALUES ( + OLD.deposit_count, + OLD.block_num, + OLD.block_pos, + OLD.leaf_type, + OLD.origin_network, + OLD.origin_address, + OLD.destination_network, + OLD.destination_address, + OLD.amount, + OLD.metadata, + OLD.tx_hash, + OLD.block_timestamp, + OLD.txn_sender + ); +END; \ No newline at end of file diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 2a077ff51..8bf085f5c 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -121,6 +121,21 @@ var ( WHERE block_num >= $1 AND block_num <= $2 ORDER BY block_num ASC, block_pos ASC; `, bridgeTableName) + + // bridgeRestoreSQL is SQL query that moves rows back from bridge_archive to bridge table + bridgeRestoreSQL = fmt.Sprintf(` + INSERT INTO %s ( + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, + tx_hash, block_timestamp, txn_sender, deposit_count + ) + SELECT + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, + tx_hash, block_timestamp, txn_sender, deposit_count + FROM bridge_archive + WHERE deposit_count > $1 AND deposit_count <= $2 + `, bridgeTableName) ) // Bridge is the representation of a bridge event @@ -1172,11 +1187,46 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { } }() + // --------------------------------------------------------------------- + // 1. Load affected BackwardLETs BEFORE deleting blocks, bridges and BackwardLET entries + // --------------------------------------------------------------------- + backwardLETsQuery := ` + SELECT previous_deposit_count, new_deposit_count + FROM backward_let + WHERE block_num >= $1` + var backwardLETs []*BackwardLET + if err := meddler.QueryAll(tx, &backwardLETs, backwardLETsQuery, firstReorgedBlock); err != nil { + return fmt.Errorf("failed to retrieve the affected backward LETs: %w", err) + } + + // --------------------------------------------------------------------- + // 2. Restore bridge rows from archive for each interval + // --------------------------------------------------------------------- + for _, backwardLET := range backwardLETs { + if backwardLET.PreviousDepositCount.Cmp(backwardLET.NewDepositCount) <= 0 { + continue // malformed but safe to skip + } + + if _, err := tx.Exec(bridgeRestoreSQL, backwardLET.NewDepositCount, backwardLET.NewDepositCount); err != nil { + return fmt.Errorf("failed to restore bridges from bridge archive (range %d..%d): %w", + backwardLET.NewDepositCount, backwardLET.PreviousDepositCount, err) + } + + // Remove restored rows from archive + _, err := tx.Exec(`DELETE FROM bridge_archive + WHERE deposit_count > $1 AND deposit_count <= $2`, backwardLET.NewDepositCount, backwardLET.PreviousDepositCount) + if err != nil { + return fmt.Errorf("failed to delete restored rows from archive (range %d..%d): %w", + backwardLET.NewDepositCount, backwardLET.PreviousDepositCount, err) + } + } + blocksRes, err := tx.Exec(`DELETE FROM block WHERE num >= $1;`, firstReorgedBlock) if err != nil { p.log.Errorf("failed to delete blocks during reorg: %v", err) return err } + rowsAffected, err := blocksRes.RowsAffected() if err != nil { p.log.Errorf("failed to get rows affected during reorg: %v", err) From 275aed86b496f56e0decdf6b8ba7fdd0e7fee791 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Fri, 12 Dec 2025 07:08:35 +0100 Subject: [PATCH 13/73] increase code coverage, ignore double insertions to bridge_archive table --- bridgesync/downloader_test.go | 27 +++++++---- bridgesync/migrations/bridgesync0011.sql | 2 +- bridgesync/processor.go | 23 +++++---- bridgesync/processor_test.go | 59 ++++++++++++++++++------ 4 files changed, 79 insertions(+), 32 deletions(-) diff --git a/bridgesync/downloader_test.go b/bridgesync/downloader_test.go index b9bba2a30..b6974f120 100644 --- a/bridgesync/downloader_test.go +++ b/bridgesync/downloader_test.go @@ -55,6 +55,7 @@ func TestBuildAppender(t *testing.T) { eventSignature common.Hash deploymentKind BridgeDeployment logBuilder func() (types.Log, error) + expectedErr string }{ { name: "bridgeEventSignature appender", @@ -372,6 +373,12 @@ func TestBuildAppender(t *testing.T) { return l, nil }, }, + { + name: "unknown deployment kind", + deploymentKind: 100, + logBuilder: func() (types.Log, error) { return types.Log{}, nil }, + expectedErr: "unsupported bridge deployment kind: 100", + }, } for _, tt := range tests { @@ -382,17 +389,21 @@ func TestBuildAppender(t *testing.T) { logger := logger.WithFields("module", "test") bridgeDeployment.kind = tt.deploymentKind appenderMap, err := buildAppender(ethClient, bridgeAddr, false, bridgeDeployment, logger) - require.NoError(t, err) - require.NotNil(t, appenderMap) + if tt.expectedErr == "" { + require.NoError(t, err) + require.NotNil(t, appenderMap) - block := &sync.EVMBlock{EVMBlockHeader: sync.EVMBlockHeader{Num: blockNum}} + block := &sync.EVMBlock{EVMBlockHeader: sync.EVMBlockHeader{Num: blockNum}} - appenderFunc, exists := appenderMap[tt.eventSignature] - require.True(t, exists) + appenderFunc, exists := appenderMap[tt.eventSignature] + require.True(t, exists) - err = appenderFunc(block, log) - require.NoError(t, err) - require.Len(t, block.Events, 1) + err = appenderFunc(block, log) + require.NoError(t, err) + require.Len(t, block.Events, 1) + } else { + require.ErrorContains(t, err, tt.expectedErr) + } }) } } diff --git a/bridgesync/migrations/bridgesync0011.sql b/bridgesync/migrations/bridgesync0011.sql index 67df93dcf..83d107887 100644 --- a/bridgesync/migrations/bridgesync0011.sql +++ b/bridgesync/migrations/bridgesync0011.sql @@ -42,7 +42,7 @@ CREATE TRIGGER IF NOT EXISTS archive_bridge_before_delete BEFORE DELETE ON bridge FOR EACH ROW BEGIN - INSERT INTO bridge_archive ( + INSERT OR IGNORE INTO bridge_archive ( deposit_count, block_num, block_pos, diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 8bf085f5c..866d720bc 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1203,21 +1203,27 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { // 2. Restore bridge rows from archive for each interval // --------------------------------------------------------------------- for _, backwardLET := range backwardLETs { - if backwardLET.PreviousDepositCount.Cmp(backwardLET.NewDepositCount) <= 0 { - continue // malformed but safe to skip + if backwardLET.PreviousDepositCount == nil || !backwardLET.PreviousDepositCount.IsUint64() { + return fmt.Errorf("invalid previous deposit count: %d", backwardLET.PreviousDepositCount) } - if _, err := tx.Exec(bridgeRestoreSQL, backwardLET.NewDepositCount, backwardLET.NewDepositCount); err != nil { + if backwardLET.PreviousDepositCount == nil || !backwardLET.PreviousDepositCount.IsUint64() { + return fmt.Errorf("invalid new deposit count: %d", backwardLET.NewDepositCount) + } + + prevDepositCount := backwardLET.PreviousDepositCount.Uint64() + newDepositCount := backwardLET.NewDepositCount.Uint64() + if _, err := tx.Exec(bridgeRestoreSQL, newDepositCount, prevDepositCount); err != nil { return fmt.Errorf("failed to restore bridges from bridge archive (range %d..%d): %w", - backwardLET.NewDepositCount, backwardLET.PreviousDepositCount, err) + newDepositCount, prevDepositCount, err) } // Remove restored rows from archive _, err := tx.Exec(`DELETE FROM bridge_archive - WHERE deposit_count > $1 AND deposit_count <= $2`, backwardLET.NewDepositCount, backwardLET.PreviousDepositCount) + WHERE deposit_count > $1 AND deposit_count <= $2`, newDepositCount, prevDepositCount) if err != nil { return fmt.Errorf("failed to delete restored rows from archive (range %d..%d): %w", - backwardLET.NewDepositCount, backwardLET.PreviousDepositCount, err) + newDepositCount, prevDepositCount, err) } } @@ -1369,10 +1375,9 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { // remove all the bridges whose deposit_count is greater than the one captured by the BackwardLET event deleteBridges := fmt.Sprintf("DELETE from %s WHERE deposit_count > $1", bridgeTableName) - _, err := tx.Exec(deleteBridges, newDepositCount) + _, err := tx.Exec(deleteBridges, newDepositCountU64) if err != nil { - p.log.Errorf("failed to remove bridges whose deposit count is greater than or equal to %d", - event.BackwardLET.NewDepositCount) + p.log.Errorf("failed to remove bridges whose deposit count is greater than %d", newDepositCountU64) return err } diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 2d5766522..38ab0d4bf 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -20,6 +20,7 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/polygonzkevmbridge" bridgetypes "github.com/agglayer/aggkit/bridgeservice/types" "github.com/agglayer/aggkit/bridgesync/migrations" + bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/sync" @@ -320,6 +321,18 @@ func TestProcessor(t *testing.T) { eventsToClaims(block5.Events), )), }, + &reorgAction{ + p: p, + description: "reorg the last block", + firstReorgedBlock: 5, + }, + &getLastProcessedBlockAction{ + p: p, + description: "after last block reorged", + ctx: context.Background(), + expectedLastProcessedBlock: 4, + expectedErr: nil, + }, } for _, a := range actions { @@ -339,13 +352,13 @@ var ( Event{Bridge: &Bridge{ BlockNum: 1, BlockPos: 0, - LeafType: 1, + LeafType: bridgesynctypes.LeafTypeAsset.Uint8(), OriginNetwork: 1, - OriginAddress: common.HexToAddress("01"), + OriginAddress: common.HexToAddress("1"), DestinationNetwork: 1, - DestinationAddress: common.HexToAddress("01"), + DestinationAddress: common.HexToAddress("1"), Amount: big.NewInt(1), - Metadata: common.Hex2Bytes("01"), + Metadata: common.Hex2Bytes("1"), DepositCount: 0, }}, Event{Claim: &Claim{ @@ -353,8 +366,8 @@ var ( BlockPos: 1, GlobalIndex: big.NewInt(1), OriginNetwork: 1, - OriginAddress: common.HexToAddress("01"), - DestinationAddress: common.HexToAddress("01"), + OriginAddress: common.HexToAddress("1"), + DestinationAddress: common.HexToAddress("1"), Amount: big.NewInt(1), MainnetExitRoot: common.Hash{}, }}, @@ -390,27 +403,39 @@ var ( Event{Bridge: &Bridge{ BlockNum: 3, BlockPos: 0, - LeafType: 2, + LeafType: bridgesynctypes.LeafTypeAsset.Uint8(), OriginNetwork: 2, - OriginAddress: common.HexToAddress("02"), + OriginAddress: common.HexToAddress("2"), DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("02"), + DestinationAddress: common.HexToAddress("2"), Amount: big.NewInt(2), - Metadata: common.Hex2Bytes("02"), + Metadata: common.Hex2Bytes("2"), DepositCount: 1, }}, Event{Bridge: &Bridge{ BlockNum: 3, BlockPos: 1, - LeafType: 3, + LeafType: bridgesynctypes.LeafTypeAsset.Uint8(), OriginNetwork: 3, - OriginAddress: common.HexToAddress("03"), + OriginAddress: common.HexToAddress("3"), DestinationNetwork: 3, - DestinationAddress: common.HexToAddress("03"), + DestinationAddress: common.HexToAddress("3"), Amount: big.NewInt(0), - Metadata: common.Hex2Bytes("03"), + Metadata: common.Hex2Bytes("3"), DepositCount: 2, }}, + Event{Bridge: &Bridge{ + BlockNum: 3, + BlockPos: 2, + LeafType: bridgesynctypes.LeafTypeAsset.Uint8(), + OriginNetwork: 3, + OriginAddress: common.HexToAddress("4"), + DestinationNetwork: 3, + DestinationAddress: common.HexToAddress("4"), + Amount: big.NewInt(0), + Metadata: common.Hex2Bytes("4"), + DepositCount: 3, + }}, }, } block4 = sync.Block{ @@ -453,6 +478,12 @@ var ( BlockPos: 3, LegacyTokenAddress: common.HexToAddress("0x11"), }}, + Event{BackwardLET: &BackwardLET{ + BlockNum: 5, + BlockPos: 4, + PreviousDepositCount: big.NewInt(3), + NewDepositCount: big.NewInt(2), + }}, }, } ) From ecc3ded4e40de10fff8725f4a8dc693bf6cc1fff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Fri, 12 Dec 2025 12:12:55 +0100 Subject: [PATCH 14/73] address 1st round of comments --- bridgesync/migrations/bridgesync0011.sql | 13 +++++----- bridgesync/processor.go | 32 +++++++++++++++++++----- 2 files changed, 33 insertions(+), 12 deletions(-) diff --git a/bridgesync/migrations/bridgesync0011.sql b/bridgesync/migrations/bridgesync0011.sql index 670cea865..f61111edc 100644 --- a/bridgesync/migrations/bridgesync0011.sql +++ b/bridgesync/migrations/bridgesync0011.sql @@ -1,9 +1,7 @@ -- +migrate Down -DROP TABLE IF EXISTS backward_let; - DROP TRIGGER IF EXISTS archive_bridge_before_delete; - DROP TABLE IF EXISTS bridge_archive; +DROP TABLE IF EXISTS backward_let; -- +migrate Up UPDATE bridge @@ -45,7 +43,8 @@ CREATE TABLE IF NOT EXISTS bridge_archive ( metadata BLOB, tx_hash VARCHAR, block_timestamp INTEGER, - txn_sender VARCHAR + txn_sender VARCHAR, + from_address VARCHAR ); ------------------------------------------------------------------------------ @@ -68,7 +67,8 @@ BEGIN metadata, tx_hash, block_timestamp, - txn_sender + txn_sender, + from_address, ) VALUES ( OLD.deposit_count, @@ -83,6 +83,7 @@ BEGIN OLD.metadata, OLD.tx_hash, OLD.block_timestamp, - OLD.txn_sender + OLD.txn_sender, + OLD.from_address, ); END; diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 866d720bc..4f20e498d 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -127,12 +127,12 @@ var ( INSERT INTO %s ( block_num, block_pos, leaf_type, origin_network, origin_address, destination_network, destination_address, amount, metadata, - tx_hash, block_timestamp, txn_sender, deposit_count + tx_hash, block_timestamp, txn_sender, deposit_count, from_address ) SELECT block_num, block_pos, leaf_type, origin_network, origin_address, destination_network, destination_address, amount, metadata, - tx_hash, block_timestamp, txn_sender, deposit_count + tx_hash, block_timestamp, txn_sender, deposit_count, from_address FROM bridge_archive WHERE deposit_count > $1 AND deposit_count <= $2 `, bridgeTableName) @@ -485,6 +485,21 @@ type BackwardLET struct { NewRoot common.Hash `meddler:"new_root,hash"` } +// String returns a formatted string representation of BackwardLET for debugging and logging. +func (b *BackwardLET) String() string { + previousDepositCountStr := nilStr + if b.PreviousDepositCount != nil { + previousDepositCountStr = b.PreviousDepositCount.String() + } + newDepositCountStr := nilStr + if b.NewDepositCount != nil { + newDepositCountStr = b.NewDepositCount.String() + } + return fmt.Sprintf("BackwardLET{BlockNum: %d, BlockPos: %d, "+ + "PreviousDepositCount: %s, PreviousRoot: %s, NewDepositCount: %s, NewRoot: %s}", + b.BlockNum, b.BlockPos, previousDepositCountStr, b.PreviousRoot.String(), newDepositCountStr, b.NewRoot.String()) +} + // Event combination of bridge, claim, token mapping and legacy token migration events type Event struct { Bridge *Bridge @@ -1207,7 +1222,7 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { return fmt.Errorf("invalid previous deposit count: %d", backwardLET.PreviousDepositCount) } - if backwardLET.PreviousDepositCount == nil || !backwardLET.PreviousDepositCount.IsUint64() { + if backwardLET.NewDepositCount == nil || !backwardLET.NewDepositCount.IsUint64() { return fmt.Errorf("invalid new deposit count: %d", backwardLET.NewDepositCount) } @@ -1359,17 +1374,22 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { if event.BackwardLET != nil { newDepositCount := event.BackwardLET.NewDepositCount if !newDepositCount.IsUint64() { - return fmt.Errorf("NewDepositCount=%d does not fit into uint64", newDepositCount) + return fmt.Errorf("new deposit count=%d does not fit into uint64", newDepositCount) } newDepositCountU64 := newDepositCount.Uint64() if newDepositCountU64 > math.MaxUint32 { - return fmt.Errorf("NewDepositCount=%d exceeds uint32 max (%d)", newDepositCountU64, uint32(math.MaxUint32)) + return fmt.Errorf("new deposit count=%d exceeds uint32 max (%d)", newDepositCountU64, uint32(math.MaxUint32)) + } + + if newDepositCountU64 == 0 { + return fmt.Errorf("new deposit count must be at least 1 to compute the leaf index, got 0") } leafIndex := uint32(newDepositCountU64 - 1) if err := p.exitTree.BackwardToIndex(ctx, tx, leafIndex); err != nil { - p.log.Errorf("failed to backward local exit tree to %d deposit count", leafIndex) + p.log.Errorf("failed to backward local exit tree to leaf index %d (deposit count: %d)", + leafIndex, newDepositCountU64) return err } From f98e3507242d236a660ee6247fd72d253565ebd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Fri, 12 Dec 2025 12:17:58 +0100 Subject: [PATCH 15/73] reverse the steps, first delete the bridges and then update the exit tree accordingly --- bridgesync/processor.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 4f20e498d..ddedd3252 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1386,13 +1386,6 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return fmt.Errorf("new deposit count must be at least 1 to compute the leaf index, got 0") } - leafIndex := uint32(newDepositCountU64 - 1) - if err := p.exitTree.BackwardToIndex(ctx, tx, leafIndex); err != nil { - p.log.Errorf("failed to backward local exit tree to leaf index %d (deposit count: %d)", - leafIndex, newDepositCountU64) - return err - } - // remove all the bridges whose deposit_count is greater than the one captured by the BackwardLET event deleteBridges := fmt.Sprintf("DELETE from %s WHERE deposit_count > $1", bridgeTableName) _, err := tx.Exec(deleteBridges, newDepositCountU64) @@ -1401,6 +1394,14 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } + // remove all the indices after the provided leafIndex in the exit tree + leafIndex := uint32(newDepositCountU64 - 1) + if err := p.exitTree.BackwardToIndex(ctx, tx, leafIndex); err != nil { + p.log.Errorf("failed to backward local exit tree to leaf index %d (deposit count: %d)", + leafIndex, newDepositCountU64) + return err + } + if err = meddler.Insert(tx, backwardLETTableName, event.BackwardLET); err != nil { p.log.Errorf("failed to insert backward local exit tree event at block %d: %v", block.Num, err) return err From a74ca6e4166762a4dd8ca8519e18d29678d039e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= <93934272+Stefan-Ethernal@users.noreply.github.com> Date: Fri, 12 Dec 2025 13:48:19 +0100 Subject: [PATCH 16/73] Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- bridgesync/processor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index ddedd3252..326c52839 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1374,7 +1374,7 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { if event.BackwardLET != nil { newDepositCount := event.BackwardLET.NewDepositCount if !newDepositCount.IsUint64() { - return fmt.Errorf("new deposit count=%d does not fit into uint64", newDepositCount) + return fmt.Errorf("new deposit count=%s does not fit into uint64", newDepositCount) } newDepositCountU64 := newDepositCount.Uint64() From b2035c272bc723c6961e36146baec7c3a1f20601 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Fri, 12 Dec 2025 13:48:39 +0100 Subject: [PATCH 17/73] fix sql syntax error --- bridgesync/migrations/bridgesync0011.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bridgesync/migrations/bridgesync0011.sql b/bridgesync/migrations/bridgesync0011.sql index f61111edc..0600c3a77 100644 --- a/bridgesync/migrations/bridgesync0011.sql +++ b/bridgesync/migrations/bridgesync0011.sql @@ -84,6 +84,6 @@ BEGIN OLD.tx_hash, OLD.block_timestamp, OLD.txn_sender, - OLD.from_address, + OLD.from_address ); END; From 38f1fbefddeb46b9c4cb0d11282d120f51be00d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Fri, 12 Dec 2025 13:56:03 +0100 Subject: [PATCH 18/73] leaf index is the same as deposit count --- bridgesync/processor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 326c52839..d5c839034 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1395,7 +1395,7 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } // remove all the indices after the provided leafIndex in the exit tree - leafIndex := uint32(newDepositCountU64 - 1) + leafIndex := uint32(newDepositCountU64) if err := p.exitTree.BackwardToIndex(ctx, tx, leafIndex); err != nil { p.log.Errorf("failed to backward local exit tree to leaf index %d (deposit count: %d)", leafIndex, newDepositCountU64) From 752f986c62d02fdce4a32582903cde397daa18ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Fri, 12 Dec 2025 14:00:18 +0100 Subject: [PATCH 19/73] invoke BackwardLET string function --- bridgesync/processor.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index d5c839034..466362666 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -535,6 +535,9 @@ func (e Event) String() string { if e.SetClaim != nil { parts = append(parts, e.SetClaim.String()) } + if e.BackwardLET != nil { + parts = append(parts, e.BackwardLET.String()) + } return "Event{" + strings.Join(parts, ", ") + "}" } From 7c941abf0110bbbee5122f57daaa9d41a80bd5b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Fri, 12 Dec 2025 14:07:06 +0100 Subject: [PATCH 20/73] remove redundant check --- bridgesync/processor.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 466362666..e8e09e37c 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1385,10 +1385,6 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return fmt.Errorf("new deposit count=%d exceeds uint32 max (%d)", newDepositCountU64, uint32(math.MaxUint32)) } - if newDepositCountU64 == 0 { - return fmt.Errorf("new deposit count must be at least 1 to compute the leaf index, got 0") - } - // remove all the bridges whose deposit_count is greater than the one captured by the BackwardLET event deleteBridges := fmt.Sprintf("DELETE from %s WHERE deposit_count > $1", bridgeTableName) _, err := tx.Exec(deleteBridges, newDepositCountU64) From 7ffebfdbf809882fb3a3a6b65486a45be6f2cccf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Fri, 12 Dec 2025 14:19:55 +0100 Subject: [PATCH 21/73] fix sql syntax in migration file (2nd part) --- bridgesync/migrations/bridgesync0011.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bridgesync/migrations/bridgesync0011.sql b/bridgesync/migrations/bridgesync0011.sql index 0600c3a77..848260eee 100644 --- a/bridgesync/migrations/bridgesync0011.sql +++ b/bridgesync/migrations/bridgesync0011.sql @@ -68,7 +68,7 @@ BEGIN tx_hash, block_timestamp, txn_sender, - from_address, + from_address ) VALUES ( OLD.deposit_count, From 2b1ce28a650a35624e7b89f66cf825a7b544946c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Mon, 15 Dec 2025 09:35:02 +0100 Subject: [PATCH 22/73] reinsert exit tree leaves on reorg of backward let event --- bridgesync/processor.go | 71 ++++++++++++++++++++++++++++------------- common/common.go | 23 +++++++++++++ 2 files changed, 71 insertions(+), 23 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index e8e09e37c..0c10e4113 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -6,7 +6,6 @@ import ( "encoding/binary" "errors" "fmt" - "math" "math/big" "regexp" "strings" @@ -561,6 +560,7 @@ func (b BridgeSyncRuntimeData) String() string { } return res } + func (b BridgeSyncRuntimeData) IsCompatible(storage BridgeSyncRuntimeData) error { tmp := sync.RuntimeData{ ChainID: b.ChainID, @@ -1220,25 +1220,50 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { // --------------------------------------------------------------------- // 2. Restore bridge rows from archive for each interval // --------------------------------------------------------------------- + restoredBridgesQuery := `SELECT * FROM bridge WHERE deposit_count > $1 AND deposit_count <= $2` for _, backwardLET := range backwardLETs { - if backwardLET.PreviousDepositCount == nil || !backwardLET.PreviousDepositCount.IsUint64() { - return fmt.Errorf("invalid previous deposit count: %d", backwardLET.PreviousDepositCount) + prevDepositCount, err := aggkitcommon.SafeUint64(backwardLET.PreviousDepositCount) + if err != nil { + return fmt.Errorf("invalid previous deposit count %s: %w", backwardLET.PreviousDepositCount, err) } - if backwardLET.NewDepositCount == nil || !backwardLET.NewDepositCount.IsUint64() { - return fmt.Errorf("invalid new deposit count: %d", backwardLET.NewDepositCount) + newDepositCount, err := aggkitcommon.SafeUint64(backwardLET.NewDepositCount) + if err != nil { + return fmt.Errorf("invalid new deposit count %s: %w", backwardLET.NewDepositCount, err) } - prevDepositCount := backwardLET.PreviousDepositCount.Uint64() - newDepositCount := backwardLET.NewDepositCount.Uint64() if _, err := tx.Exec(bridgeRestoreSQL, newDepositCount, prevDepositCount); err != nil { - return fmt.Errorf("failed to restore bridges from bridge archive (range %d..%d): %w", + return fmt.Errorf("failed to restore bridges from bridge archive (deposit counts range: %d..%d): %w", + newDepositCount, prevDepositCount, err) + } + + // --------------------------------------------------------------------- + // 3. Restore bridges in the exit tree + // --------------------------------------------------------------------- + var restoredBridges []*Bridge + err = meddler.QueryAll(tx, &restoredBridges, restoredBridgesQuery, newDepositCount, prevDepositCount) + if err != nil { + return fmt.Errorf("failed to retrieve the restored bridges (deposit counts range: %d..%d): %w", newDepositCount, prevDepositCount, err) } - // Remove restored rows from archive - _, err := tx.Exec(`DELETE FROM bridge_archive - WHERE deposit_count > $1 AND deposit_count <= $2`, newDepositCount, prevDepositCount) + for _, restoredBridge := range restoredBridges { + if _, err = p.exitTree.PutLeaf(tx, restoredBridge.BlockNum, restoredBridge.BlockPos, types.Leaf{ + Index: restoredBridge.DepositCount, + Hash: restoredBridge.Hash(), + }); err != nil { + if errors.Is(err, tree.ErrInvalidIndex) { + p.halt(fmt.Sprintf("error adding leaf to the exit tree: %v", err)) + } + return sync.ErrInconsistentState + } + } + + // --------------------------------------------------------------------- + // 4. Remove restored bridges from the bridge_archive table + // --------------------------------------------------------------------- + _, err = tx.Exec(`DELETE FROM bridge_archive WHERE deposit_count > $1 AND deposit_count <= $2`, + newDepositCount, prevDepositCount) if err != nil { return fmt.Errorf("failed to delete restored rows from archive (range %d..%d): %w", newDepositCount, prevDepositCount, err) @@ -1375,32 +1400,32 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } if event.BackwardLET != nil { - newDepositCount := event.BackwardLET.NewDepositCount - if !newDepositCount.IsUint64() { - return fmt.Errorf("new deposit count=%s does not fit into uint64", newDepositCount) - } - - newDepositCountU64 := newDepositCount.Uint64() - if newDepositCountU64 > math.MaxUint32 { - return fmt.Errorf("new deposit count=%d exceeds uint32 max (%d)", newDepositCountU64, uint32(math.MaxUint32)) + newDepositCountU64, err := aggkitcommon.SafeUint64(event.BackwardLET.NewDepositCount) + if err != nil { + return fmt.Errorf("failed to convert new deposit count to uint64: %w", err) } - // remove all the bridges whose deposit_count is greater than the one captured by the BackwardLET event + // 1. remove all the bridges whose deposit_count is greater than the one captured by the BackwardLET event deleteBridges := fmt.Sprintf("DELETE from %s WHERE deposit_count > $1", bridgeTableName) - _, err := tx.Exec(deleteBridges, newDepositCountU64) + _, err = tx.Exec(deleteBridges, newDepositCountU64) if err != nil { p.log.Errorf("failed to remove bridges whose deposit count is greater than %d", newDepositCountU64) return err } - // remove all the indices after the provided leafIndex in the exit tree - leafIndex := uint32(newDepositCountU64) + // 2. remove all leafs from the exit tree with indices greater than leafIndex in the exit tree + leafIndex, err := aggkitcommon.SafeUint32(newDepositCountU64) + if err != nil { + return fmt.Errorf("failed to convert new deposit count (uint64) to leaf index (uint32): %w", + err) + } if err := p.exitTree.BackwardToIndex(ctx, tx, leafIndex); err != nil { p.log.Errorf("failed to backward local exit tree to leaf index %d (deposit count: %d)", leafIndex, newDepositCountU64) return err } + // 3. insert the backward let event to designated table if err = meddler.Insert(tx, backwardLETTableName, event.BackwardLET); err != nil { p.log.Errorf("failed to insert backward local exit tree event at block %d: %v", block.Num, err) return err diff --git a/common/common.go b/common/common.go index 957b40a28..9a2f0cf42 100644 --- a/common/common.go +++ b/common/common.go @@ -3,6 +3,7 @@ package common import ( "crypto/ecdsa" "encoding/binary" + "errors" "fmt" "math" "math/big" @@ -186,3 +187,25 @@ func ParseUint64HexOrDecimal(str string) (uint64, error) { } return num, nil } + +// SafeUint64 converts big.Int into uint64, if it fits into it. +// Otherwise it returns an error. +func SafeUint64(i *big.Int) (uint64, error) { + if i == nil { + return 0, errors.New("value is undefined") + } + + if !i.IsUint64() { + return 0, fmt.Errorf("value=%v does not fit in uint64", i) + } + return i.Uint64(), nil +} + +// SafeUint32 downcasts the provided uint64 value to uint32, if it fits into it. +// Otherwise it returns an error. +func SafeUint32(v uint64) (uint32, error) { + if v > math.MaxUint32 { + return 0, fmt.Errorf("value=%d exceeds uint32 max (%d)", v, uint32(math.MaxUint32)) + } + return uint32(v), nil +} From ba085089b0fdd66133cc362fc3ca5d1416baf5e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Mon, 15 Dec 2025 09:56:22 +0100 Subject: [PATCH 23/73] add bridge source column and add ordering by deposit_count --- bridgesync/migrations/bridgesync0011.sql | 3 ++ bridgesync/processor.go | 45 ++++++++++++++---------- 2 files changed, 30 insertions(+), 18 deletions(-) diff --git a/bridgesync/migrations/bridgesync0011.sql b/bridgesync/migrations/bridgesync0011.sql index 848260eee..c3d2e1f45 100644 --- a/bridgesync/migrations/bridgesync0011.sql +++ b/bridgesync/migrations/bridgesync0011.sql @@ -2,6 +2,7 @@ DROP TRIGGER IF EXISTS archive_bridge_before_delete; DROP TABLE IF EXISTS bridge_archive; DROP TABLE IF EXISTS backward_let; +ALTER TABLE bridge DROP COLUMN source; -- +migrate Up UPDATE bridge @@ -87,3 +88,5 @@ BEGIN OLD.from_address ); END; + +ALTER TABLE bridge ADD COLUMN source TEXT; diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 0c10e4113..5dffaf4e6 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -120,21 +120,13 @@ var ( WHERE block_num >= $1 AND block_num <= $2 ORDER BY block_num ASC, block_pos ASC; `, bridgeTableName) +) - // bridgeRestoreSQL is SQL query that moves rows back from bridge_archive to bridge table - bridgeRestoreSQL = fmt.Sprintf(` - INSERT INTO %s ( - block_num, block_pos, leaf_type, origin_network, origin_address, - destination_network, destination_address, amount, metadata, - tx_hash, block_timestamp, txn_sender, deposit_count, from_address - ) - SELECT - block_num, block_pos, leaf_type, origin_network, origin_address, - destination_network, destination_address, amount, metadata, - tx_hash, block_timestamp, txn_sender, deposit_count, from_address - FROM bridge_archive - WHERE deposit_count > $1 AND deposit_count <= $2 - `, bridgeTableName) +type BridgeSource string + +const ( + BridgeSourceBackwardLET BridgeSource = "backward_let" + BridgeSourceForwardLET BridgeSource = "forward_let" ) // Bridge is the representation of a bridge event @@ -153,6 +145,7 @@ type Bridge struct { Metadata []byte `meddler:"metadata"` DepositCount uint32 `meddler:"deposit_count"` TxnSender common.Address `meddler:"txn_sender,address"` + Source BridgeSource `meddler:"source"` } func (b *Bridge) String() string { @@ -163,11 +156,11 @@ func (b *Bridge) String() string { return fmt.Sprintf("Bridge{BlockNum: %d, BlockPos: %d, FromAddress: %s, TxHash: %s, "+ "BlockTimestamp: %d, LeafType: %d, OriginNetwork: %d, OriginAddress: %s, "+ "DestinationNetwork: %d, DestinationAddress: %s, Amount: %s, Metadata: %x, "+ - "DepositCount: %d, TxnSender: %s}", + "DepositCount: %d, TxnSender: %s, Source: %s}", b.BlockNum, b.BlockPos, b.FromAddress.String(), b.TxHash.String(), b.BlockTimestamp, b.LeafType, b.OriginNetwork, b.OriginAddress.String(), b.DestinationNetwork, b.DestinationAddress.String(), amountStr, b.Metadata, - b.DepositCount, b.TxnSender.String()) + b.DepositCount, b.TxnSender.String(), b.Source) } // Hash returns the hash of the bridge event as expected by the exit tree @@ -1220,7 +1213,23 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { // --------------------------------------------------------------------- // 2. Restore bridge rows from archive for each interval // --------------------------------------------------------------------- - restoredBridgesQuery := `SELECT * FROM bridge WHERE deposit_count > $1 AND deposit_count <= $2` + restoredBridgesQuery := `SELECT * FROM bridge + WHERE deposit_count > $1 AND deposit_count <= $2 + ORDER BY deposit_count ASC;` + bridgeRestorationSQL := ` + INSERT INTO bridge ( + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, + tx_hash, block_timestamp, txn_sender, deposit_count, from_address, source + ) + SELECT + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, + tx_hash, block_timestamp, txn_sender, deposit_count, from_address, $1 + FROM bridge_archive + WHERE deposit_count > $2 AND deposit_count <= $3 + ORDER BY deposit_count ASC; + ` for _, backwardLET := range backwardLETs { prevDepositCount, err := aggkitcommon.SafeUint64(backwardLET.PreviousDepositCount) if err != nil { @@ -1232,7 +1241,7 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { return fmt.Errorf("invalid new deposit count %s: %w", backwardLET.NewDepositCount, err) } - if _, err := tx.Exec(bridgeRestoreSQL, newDepositCount, prevDepositCount); err != nil { + if _, err := tx.Exec(bridgeRestorationSQL, BridgeSourceBackwardLET, newDepositCount, prevDepositCount); err != nil { return fmt.Errorf("failed to restore bridges from bridge archive (deposit counts range: %d..%d): %w", newDepositCount, prevDepositCount, err) } From e4c8f62763ce8da0303f6dfecaff52646617a0a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Mon, 15 Dec 2025 12:21:51 +0100 Subject: [PATCH 24/73] move the new sql migrations to bridgesync0012 migration file --- bridgesync/migrations/bridgesync0011.sql | 77 ----------------------- bridgesync/migrations/bridgesync0012.sql | 79 ++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 77 deletions(-) create mode 100644 bridgesync/migrations/bridgesync0012.sql diff --git a/bridgesync/migrations/bridgesync0011.sql b/bridgesync/migrations/bridgesync0011.sql index c3d2e1f45..6747e9702 100644 --- a/bridgesync/migrations/bridgesync0011.sql +++ b/bridgesync/migrations/bridgesync0011.sql @@ -1,8 +1,4 @@ -- +migrate Down -DROP TRIGGER IF EXISTS archive_bridge_before_delete; -DROP TABLE IF EXISTS bridge_archive; -DROP TABLE IF EXISTS backward_let; -ALTER TABLE bridge DROP COLUMN source; -- +migrate Up UPDATE bridge @@ -17,76 +13,3 @@ SET from_address = NULL WHERE tx_hash IN ( SELECT tx_hash FROM bridge GROUP BY tx_hash HAVING COUNT(*) > 1 ); - -CREATE TABLE IF NOT EXISTS backward_let ( - block_num INTEGER NOT NULL REFERENCES block (num) ON DELETE CASCADE, - block_pos INTEGER NOT NULL, - previous_deposit_count TEXT NOT NULL, - previous_root VARCHAR NOT NULL, - new_deposit_count TEXT NOT NULL, - new_root VARCHAR NOT NULL, - PRIMARY KEY (block_num, block_pos) - ); - ------------------------------------------------------------------------------- --- Create archive table ------------------------------------------------------------------------------- -CREATE TABLE IF NOT EXISTS bridge_archive ( - deposit_count INTEGER PRIMARY KEY, - block_num INTEGER NOT NULL, - block_pos INTEGER NOT NULL, - leaf_type INTEGER NOT NULL, - origin_network INTEGER NOT NULL, - origin_address VARCHAR NOT NULL, - destination_network INTEGER NOT NULL, - destination_address VARCHAR NOT NULL, - amount TEXT NOT NULL, - metadata BLOB, - tx_hash VARCHAR, - block_timestamp INTEGER, - txn_sender VARCHAR, - from_address VARCHAR - ); - ------------------------------------------------------------------------------- --- Create BEFORE DELETE trigger: archive only deleted rows ------------------------------------------------------------------------------- -CREATE TRIGGER IF NOT EXISTS archive_bridge_before_delete -BEFORE DELETE ON bridge -FOR EACH ROW -BEGIN - INSERT OR IGNORE INTO bridge_archive ( - deposit_count, - block_num, - block_pos, - leaf_type, - origin_network, - origin_address, - destination_network, - destination_address, - amount, - metadata, - tx_hash, - block_timestamp, - txn_sender, - from_address - ) - VALUES ( - OLD.deposit_count, - OLD.block_num, - OLD.block_pos, - OLD.leaf_type, - OLD.origin_network, - OLD.origin_address, - OLD.destination_network, - OLD.destination_address, - OLD.amount, - OLD.metadata, - OLD.tx_hash, - OLD.block_timestamp, - OLD.txn_sender, - OLD.from_address - ); -END; - -ALTER TABLE bridge ADD COLUMN source TEXT; diff --git a/bridgesync/migrations/bridgesync0012.sql b/bridgesync/migrations/bridgesync0012.sql new file mode 100644 index 000000000..1b8735ad6 --- /dev/null +++ b/bridgesync/migrations/bridgesync0012.sql @@ -0,0 +1,79 @@ +-- +migrate Down +DROP TRIGGER IF EXISTS archive_bridge_before_delete; +DROP TABLE IF EXISTS bridge_archive; +DROP TABLE IF EXISTS backward_let; +ALTER TABLE bridge DROP COLUMN source; + +-- +migrate Up +CREATE TABLE IF NOT EXISTS backward_let ( + block_num INTEGER NOT NULL REFERENCES block (num) ON DELETE CASCADE, + block_pos INTEGER NOT NULL, + previous_deposit_count TEXT NOT NULL, + previous_root VARCHAR NOT NULL, + new_deposit_count TEXT NOT NULL, + new_root VARCHAR NOT NULL, + PRIMARY KEY (block_num, block_pos) + ); + +------------------------------------------------------------------------------ +-- Create bridge_archive table +------------------------------------------------------------------------------ +CREATE TABLE IF NOT EXISTS bridge_archive ( + deposit_count INTEGER PRIMARY KEY, + block_num INTEGER NOT NULL, + block_pos INTEGER NOT NULL, + leaf_type INTEGER NOT NULL, + origin_network INTEGER NOT NULL, + origin_address VARCHAR NOT NULL, + destination_network INTEGER NOT NULL, + destination_address VARCHAR NOT NULL, + amount TEXT NOT NULL, + metadata BLOB, + tx_hash VARCHAR, + block_timestamp INTEGER, + txn_sender VARCHAR, + from_address VARCHAR + ); + +------------------------------------------------------------------------------ +-- Create BEFORE DELETE trigger: archive only deleted rows +------------------------------------------------------------------------------ +CREATE TRIGGER IF NOT EXISTS archive_bridge_before_delete +BEFORE DELETE ON bridge +FOR EACH ROW +BEGIN + INSERT OR IGNORE INTO bridge_archive ( + deposit_count, + block_num, + block_pos, + leaf_type, + origin_network, + origin_address, + destination_network, + destination_address, + amount, + metadata, + tx_hash, + block_timestamp, + txn_sender, + from_address + ) + VALUES ( + OLD.deposit_count, + OLD.block_num, + OLD.block_pos, + OLD.leaf_type, + OLD.origin_network, + OLD.origin_address, + OLD.destination_network, + OLD.destination_address, + OLD.amount, + OLD.metadata, + OLD.tx_hash, + OLD.block_timestamp, + OLD.txn_sender, + OLD.from_address + ); +END; + +ALTER TABLE bridge ADD COLUMN source TEXT; \ No newline at end of file From 3600cd61286c5a0a3bcc36fc75043b5bae56bcb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Mon, 15 Dec 2025 12:54:22 +0100 Subject: [PATCH 25/73] formatting --- bridgesync/processor.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 5dffaf4e6..19ee6f6e0 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1257,10 +1257,11 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { } for _, restoredBridge := range restoredBridges { - if _, err = p.exitTree.PutLeaf(tx, restoredBridge.BlockNum, restoredBridge.BlockPos, types.Leaf{ - Index: restoredBridge.DepositCount, - Hash: restoredBridge.Hash(), - }); err != nil { + if _, err = p.exitTree.PutLeaf(tx, restoredBridge.BlockNum, restoredBridge.BlockPos, + types.Leaf{ + Index: restoredBridge.DepositCount, + Hash: restoredBridge.Hash(), + }); err != nil { if errors.Is(err, tree.ErrInvalidIndex) { p.halt(fmt.Sprintf("error adding leaf to the exit tree: %v", err)) } From c900f0af30f5166e77431131d8cf68c44ff3f54e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Tue, 16 Dec 2025 15:30:39 +0100 Subject: [PATCH 26/73] TestProcessor_BackwardLET --- bridgesync/processor_test.go | 236 +++++++++++++++++++++++++++++++++-- 1 file changed, 226 insertions(+), 10 deletions(-) diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 8fdd1f373..3843b1389 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -1027,10 +1027,6 @@ func TestGetBridgesPaged(t *testing.T) { } require.NoError(t, tx.Commit()) - depositCountPtr := func(i uint64) *uint64 { - return &i - } - testCases := []struct { name string pageSize uint32 @@ -1085,7 +1081,7 @@ func TestGetBridgesPaged(t *testing.T) { name: "t4", pageSize: 1, page: 1, - depositCount: depositCountPtr(1), + depositCount: uint64Ptr(1), expectedCount: 1, expectedBridges: []*Bridge{bridges[1]}, expectedError: "", @@ -1094,7 +1090,7 @@ func TestGetBridgesPaged(t *testing.T) { name: "t5", pageSize: 3, page: 2, - depositCount: depositCountPtr(1), + depositCount: uint64Ptr(1), expectedCount: 0, expectedBridges: []*Bridge{}, expectedError: "invalid page number for given page size and total number of bridges", @@ -1112,7 +1108,7 @@ func TestGetBridgesPaged(t *testing.T) { name: "t7", pageSize: 1, page: 1, - depositCount: depositCountPtr(0), + depositCount: uint64Ptr(0), expectedCount: 1, expectedBridges: []*Bridge{bridges[0]}, expectedError: "", @@ -1140,7 +1136,7 @@ func TestGetBridgesPaged(t *testing.T) { name: "t9", pageSize: 6, page: 1, - depositCount: depositCountPtr(3), + depositCount: uint64Ptr(3), networkIDs: []uint32{ bridges[0].DestinationNetwork, bridges[2].DestinationNetwork, @@ -1155,7 +1151,7 @@ func TestGetBridgesPaged(t *testing.T) { pageSize: 1, page: 1, fromAddress: "0xE34aaF64b29273B7D567FCFc40544c014EEe9970", - depositCount: depositCountPtr(0), + depositCount: uint64Ptr(0), expectedCount: 1, expectedBridges: []*Bridge{bridges[0]}, expectedError: "", @@ -1165,7 +1161,7 @@ func TestGetBridgesPaged(t *testing.T) { pageSize: 1, page: 1, fromAddress: "0xe34aaF64b29273B7D567FCFc40544c014EEe9970", - depositCount: depositCountPtr(0), + depositCount: uint64Ptr(0), expectedCount: 1, expectedBridges: []*Bridge{bridges[0]}, expectedError: "", @@ -2750,6 +2746,10 @@ func intPtr(i int) *int { return &i } +func uint64Ptr(i uint64) *uint64 { + return &i +} + func TestProcessor_ErrorPathLogging(t *testing.T) { t.Parallel() @@ -5347,3 +5347,219 @@ func TestClaimColumnsSQL_ReflectionCheck(t *testing.T) { require.True(t, ok, "Missing SQL column for meddler-tag '%s'", col) } } + +func TestProcessor_BackwardLET(t *testing.T) { + buildBlocksWithSequentialBridges := func(blocksCount, bridgesPerBlock uint64) []sync.Block { + blocks := make([]sync.Block, 0, blocksCount) + depositCount := uint32(0) + for i := range blocksCount { + blockNum := i + 1 + block := sync.Block{ + Num: blockNum, + Hash: common.HexToHash(fmt.Sprintf("%x", blockNum)), + } + for blockPos := range bridgesPerBlock { + block.Events = append(block.Events, + Event{Bridge: &Bridge{ + BlockNum: blockNum, + BlockPos: blockPos, + DepositCount: depositCount, + }}) + + depositCount++ + } + + blocks = append(blocks, block) + } + return blocks + } + + collectExpectedBridgesUpTo := func(t *testing.T, blocks []sync.Block, targetDepositCount uint32) []Bridge { + t.Helper() + + var bridges []Bridge + for _, b := range blocks { + for _, e := range b.Events { + evt, ok := e.(Event) + require.True(t, ok) + if evt.Bridge != nil { + bridges = append(bridges, *evt.Bridge) + if evt.Bridge.DepositCount == targetDepositCount { + return bridges + } + } + } + } + return bridges + } + + testCases := []struct { + name string + setupBlocks func() []sync.Block + firstReorgedBlock *uint64 + targetDepositCount uint32 + restoredBridgeDepositCounts []uint32 + processBlockErrMsg string + }{ + { + name: "backward let after a couple of bridges", + setupBlocks: func() []sync.Block { + blocks := buildBlocksWithSequentialBridges(3, 2) + blocks = append(blocks, sync.Block{ + Num: uint64(len(blocks) + 1), + Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: uint64(len(blocks) + 1), + BlockPos: 0, + PreviousDepositCount: big.NewInt(3), + NewDepositCount: big.NewInt(2), + }}, + }, + }) + + return blocks + }, + targetDepositCount: 2, + }, + { + name: "backward let event with all the bridges", + setupBlocks: func() []sync.Block { + blocks := buildBlocksWithSequentialBridges(3, 2) + blocks = append(blocks, sync.Block{ + Num: uint64(len(blocks) + 1), + Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: uint64(len(blocks) + 1), + BlockPos: 0, + PreviousDepositCount: big.NewInt(6), + NewDepositCount: big.NewInt(0), + }}, + }, + }) + + return blocks + }, + targetDepositCount: 0, + }, + { + name: "backward let event (only the last bridge)", + setupBlocks: func() []sync.Block { + blocks := buildBlocksWithSequentialBridges(3, 2) + backwardLETBlock := sync.Block{ + Num: uint64(len(blocks) + 1), + Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: uint64(len(blocks) + 1), + BlockPos: 0, + PreviousDepositCount: big.NewInt(6), + NewDepositCount: big.NewInt(5), + }}, + }, + } + blocks = append(blocks, backwardLETBlock) + + return blocks + }, + targetDepositCount: 5, + }, + { + name: "overlapping backward let events", + setupBlocks: func() []sync.Block { + blocks := buildBlocksWithSequentialBridges(3, 2) + blocks = append(blocks, sync.Block{ + Num: uint64(len(blocks) + 1), + Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: uint64(len(blocks) + 1), + BlockPos: 0, + PreviousDepositCount: big.NewInt(6), + NewDepositCount: big.NewInt(3), + }}, + }, + }) + blocks = append(blocks, sync.Block{ + Num: uint64(len(blocks) + 2), + Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+2)), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: uint64(len(blocks) + 2), + BlockPos: 0, + PreviousDepositCount: big.NewInt(4), + NewDepositCount: big.NewInt(3), + }}, + }, + }) + + return blocks + }, + targetDepositCount: 3, + }, + { + name: "backward let after a couple of bridges + reorg backward let", + setupBlocks: func() []sync.Block { + blocks := buildBlocksWithSequentialBridges(3, 2) + backwardLETBlock := sync.Block{ + Num: uint64(len(blocks) + 1), + Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: 4, + BlockPos: 0, + PreviousDepositCount: big.NewInt(6), + NewDepositCount: big.NewInt(2), + }}, + }, + } + blocks = append(blocks, backwardLETBlock) + + return blocks + }, + firstReorgedBlock: uint64Ptr(3), + targetDepositCount: 3, + restoredBridgeDepositCounts: []uint32{3}, + }, + } + + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + dbPath := filepath.Join(t.TempDir(), "backward_let_cases.sqlite") + require.NoError(t, migrations.RunMigrations(dbPath)) + p, err := newProcessor(dbPath, "bridge-syncer", log.GetDefaultLogger(), dbQueryTimeout) + require.NoError(t, err) + + blocks := c.setupBlocks() + for _, b := range blocks { + err = p.ProcessBlock(t.Context(), b) + if c.processBlockErrMsg != "" { + require.ErrorContains(t, err, c.processBlockErrMsg) + } else { + require.NoError(t, err) + } + } + + if c.firstReorgedBlock != nil { + err = p.Reorg(t.Context(), *c.firstReorgedBlock) + require.NoError(t, err) + } + + lastProcessedBlock, err := p.GetLastProcessedBlock(t.Context()) + require.NoError(t, err) + expectedBridges := collectExpectedBridgesUpTo(t, blocks, c.targetDepositCount) + for i := range expectedBridges { + for _, restored := range c.restoredBridgeDepositCounts { + if expectedBridges[i].DepositCount == restored { + expectedBridges[i].Source = BridgeSourceBackwardLET + } + } + } + + actualBridges, err := p.GetBridges(t.Context(), 0, lastProcessedBlock) + require.NoError(t, err) + require.Equal(t, expectedBridges, actualBridges) + }) + } +} From 2e39f393d628a7f88ec6e6fc0d1a7d84ad6d80d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Tue, 16 Dec 2025 15:54:41 +0100 Subject: [PATCH 27/73] increase code coverage in common pkg --- common/common_test.go | 109 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) diff --git a/common/common_test.go b/common/common_test.go index fdf9ba65d..520526d0a 100644 --- a/common/common_test.go +++ b/common/common_test.go @@ -512,3 +512,112 @@ func TestParseUint64HexOrDecimal(t *testing.T) { }) } } + +func TestSafeUint64(t *testing.T) { + tests := []struct { + name string + input *big.Int + want uint64 + expectErr bool + }{ + { + name: "nil value", + input: nil, + expectErr: true, + }, + { + name: "zero", + input: big.NewInt(0), + want: 0, + expectErr: false, + }, + { + name: "small positive number", + input: big.NewInt(42), + want: 42, + expectErr: false, + }, + { + name: "max uint64", + input: new(big.Int).SetUint64(math.MaxUint64), + want: math.MaxUint64, + expectErr: false, + }, + { + name: "negative value", + input: big.NewInt(-1), + expectErr: true, + }, + { + name: "overflow uint64", + input: new(big.Int).Add(new(big.Int).SetUint64(math.MaxUint64), big.NewInt(1)), + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := SafeUint64(tt.input) + + if tt.expectErr { + require.Error(t, err) + return + } + + require.NoError(t, err) + require.Equal(t, tt.want, got) + }) + } +} + +func TestSafeUint32(t *testing.T) { + tests := []struct { + name string + input uint64 + want uint32 + expectErr bool + }{ + { + name: "zero", + input: 0, + want: 0, + expectErr: false, + }, + { + name: "small value", + input: 123, + want: 123, + expectErr: false, + }, + { + name: "max uint32", + input: math.MaxUint32, + want: math.MaxUint32, + expectErr: false, + }, + { + name: "just above max uint32", + input: uint64(math.MaxUint32) + 1, + expectErr: true, + }, + { + name: "max uint64", + input: math.MaxUint64, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := SafeUint32(tt.input) + + if tt.expectErr { + require.Error(t, err) + return + } + + require.NoError(t, err) + require.Equal(t, tt.want, got) + }) + } +} From 72fa4196dfc565bc0db5917fd4c81195e21ded0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Tue, 16 Dec 2025 16:01:48 +0100 Subject: [PATCH 28/73] TestProcessor_BackwardLET, backward LET on empty bridge table --- bridgesync/processor.go | 11 ++++--- bridgesync/processor_test.go | 59 +++++++++++++++++++++++++++++++++++- 2 files changed, 64 insertions(+), 6 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 37774e973..5465c7bdc 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1464,6 +1464,12 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return fmt.Errorf("failed to convert new deposit count to uint64: %w", err) } + leafIndex, err := aggkitcommon.SafeUint32(newDepositCountU64) + if err != nil { + return fmt.Errorf("failed to convert new deposit count (uint64) to leaf index (uint32): %w", + err) + } + // 1. remove all the bridges whose deposit_count is greater than the one captured by the BackwardLET event deleteBridges := fmt.Sprintf("DELETE from %s WHERE deposit_count > $1", bridgeTableName) _, err = tx.Exec(deleteBridges, newDepositCountU64) @@ -1473,11 +1479,6 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } // 2. remove all leafs from the exit tree with indices greater than leafIndex in the exit tree - leafIndex, err := aggkitcommon.SafeUint32(newDepositCountU64) - if err != nil { - return fmt.Errorf("failed to convert new deposit count (uint64) to leaf index (uint32): %w", - err) - } if err := p.exitTree.BackwardToIndex(ctx, tx, leafIndex); err != nil { p.log.Errorf("failed to backward local exit tree to leaf index %d (deposit count: %d)", leafIndex, newDepositCountU64) diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 3843b1389..d9b5cd173 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -5377,7 +5377,7 @@ func TestProcessor_BackwardLET(t *testing.T) { collectExpectedBridgesUpTo := func(t *testing.T, blocks []sync.Block, targetDepositCount uint32) []Bridge { t.Helper() - var bridges []Bridge + bridges := make([]Bridge, 0) for _, b := range blocks { for _, e := range b.Events { evt, ok := e.(Event) @@ -5498,6 +5498,63 @@ func TestProcessor_BackwardLET(t *testing.T) { }, targetDepositCount: 3, }, + { + name: "backward let on empty bridge table", + setupBlocks: func() []sync.Block { + return []sync.Block{ + { + Num: 1, + Hash: common.HexToHash("0x1"), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: 1, + BlockPos: 0, + PreviousDepositCount: big.NewInt(6), + NewDepositCount: big.NewInt(3), + }}, + }, + }} + }, + targetDepositCount: 0, + }, + { + name: "backward let invalid new deposit count (outside of uint64 range)", + setupBlocks: func() []sync.Block { + return []sync.Block{ + { + Num: 1, + Hash: common.HexToHash("0x1"), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: 1, + BlockPos: 0, + PreviousDepositCount: big.NewInt(0), + NewDepositCount: big.NewInt(-3), + }}, + }, + }} + }, + processBlockErrMsg: "failed to convert new deposit count to uint64", + }, + { + name: "backward let invalid new deposit count (outside of uint32 range)", + setupBlocks: func() []sync.Block { + return []sync.Block{ + { + Num: 1, + Hash: common.HexToHash("0x1"), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: 1, + BlockPos: 0, + PreviousDepositCount: big.NewInt(0), + NewDepositCount: big.NewInt(4294967296), + }}, + }, + }} + }, + processBlockErrMsg: "failed to convert new deposit count (uint64) to leaf index (uint32)", + }, { name: "backward let after a couple of bridges + reorg backward let", setupBlocks: func() []sync.Block { From a6ac3d807b7ffbe7b10d476f8424a51f0e47b82e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Tue, 16 Dec 2025 16:15:00 +0100 Subject: [PATCH 29/73] add source column to bridge_archive table and copy the value on deletion trigger --- bridgesync/migrations/bridgesync0012.sql | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/bridgesync/migrations/bridgesync0012.sql b/bridgesync/migrations/bridgesync0012.sql index 1b8735ad6..376424202 100644 --- a/bridgesync/migrations/bridgesync0012.sql +++ b/bridgesync/migrations/bridgesync0012.sql @@ -32,7 +32,8 @@ CREATE TABLE IF NOT EXISTS bridge_archive ( tx_hash VARCHAR, block_timestamp INTEGER, txn_sender VARCHAR, - from_address VARCHAR + from_address VARCHAR, + source TEXT ); ------------------------------------------------------------------------------ @@ -56,7 +57,8 @@ BEGIN tx_hash, block_timestamp, txn_sender, - from_address + from_address, + source ) VALUES ( OLD.deposit_count, @@ -72,7 +74,8 @@ BEGIN OLD.tx_hash, OLD.block_timestamp, OLD.txn_sender, - OLD.from_address + OLD.from_address, + OLD.source ); END; From 39c37b318ddb5625881459e1de20c69c34e345f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Wed, 17 Dec 2025 08:07:52 +0100 Subject: [PATCH 30/73] log the deleted bridges by deposit counts --- bridgesync/processor.go | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 5465c7bdc..0f336bbdb 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1471,11 +1471,25 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } // 1. remove all the bridges whose deposit_count is greater than the one captured by the BackwardLET event - deleteBridges := fmt.Sprintf("DELETE from %s WHERE deposit_count > $1", bridgeTableName) - _, err = tx.Exec(deleteBridges, newDepositCountU64) + deleteBridgesSQL := fmt.Sprintf("DELETE from %s WHERE deposit_count > $1 RETURNING deposit_count", bridgeTableName) + rows, err := tx.Query(deleteBridgesSQL, newDepositCountU64) if err != nil { - p.log.Errorf("failed to remove bridges whose deposit count is greater than %d", newDepositCountU64) - return err + return fmt.Errorf("failed to delete bridges: %w", err) + } + defer rows.Close() + + var deleted []uint32 + for rows.Next() { + var depositCount uint32 + if err := rows.Scan(&depositCount); err != nil { + return err + } + deleted = append(deleted, depositCount) + } + + if len(deleted) > 0 { + p.log.Debugf("deleted bridges with deposit_count > %d due to BackwardLET: %v", + newDepositCountU64, deleted) } // 2. remove all leafs from the exit tree with indices greater than leafIndex in the exit tree From 4b8235ff144172b8d861a3ade5ee80bfe32aad92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Wed, 17 Dec 2025 08:08:10 +0100 Subject: [PATCH 31/73] backward let in between of bridges --- bridgesync/processor_test.go | 51 +++++++++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 10 deletions(-) diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index d9b5cd173..200b35c14 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -5349,11 +5349,12 @@ func TestClaimColumnsSQL_ReflectionCheck(t *testing.T) { } func TestProcessor_BackwardLET(t *testing.T) { - buildBlocksWithSequentialBridges := func(blocksCount, bridgesPerBlock uint64) []sync.Block { + buildBlocksWithSequentialBridges := func(blocksCount, bridgesPerBlock uint64, + blockNumOffset uint64, depositCountOffset uint32) []sync.Block { blocks := make([]sync.Block, 0, blocksCount) - depositCount := uint32(0) + depositCount := depositCountOffset for i := range blocksCount { - blockNum := i + 1 + blockNum := i + 1 + blockNumOffset block := sync.Block{ Num: blockNum, Hash: common.HexToHash(fmt.Sprintf("%x", blockNum)), @@ -5374,11 +5375,16 @@ func TestProcessor_BackwardLET(t *testing.T) { return blocks } - collectExpectedBridgesUpTo := func(t *testing.T, blocks []sync.Block, targetDepositCount uint32) []Bridge { + collectExpectedBridgesUpTo := func(t *testing.T, blocks []sync.Block, + skipBlocks []uint64, targetDepositCount uint32) []Bridge { t.Helper() bridges := make([]Bridge, 0) for _, b := range blocks { + if slices.Contains(skipBlocks, b.Num) { + continue + } + for _, e := range b.Events { evt, ok := e.(Event) require.True(t, ok) @@ -5398,13 +5404,14 @@ func TestProcessor_BackwardLET(t *testing.T) { setupBlocks func() []sync.Block firstReorgedBlock *uint64 targetDepositCount uint32 + skipBlocks []uint64 restoredBridgeDepositCounts []uint32 processBlockErrMsg string }{ { name: "backward let after a couple of bridges", setupBlocks: func() []sync.Block { - blocks := buildBlocksWithSequentialBridges(3, 2) + blocks := buildBlocksWithSequentialBridges(3, 2, 0, 0) blocks = append(blocks, sync.Block{ Num: uint64(len(blocks) + 1), Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), @@ -5425,7 +5432,7 @@ func TestProcessor_BackwardLET(t *testing.T) { { name: "backward let event with all the bridges", setupBlocks: func() []sync.Block { - blocks := buildBlocksWithSequentialBridges(3, 2) + blocks := buildBlocksWithSequentialBridges(3, 2, 0, 0) blocks = append(blocks, sync.Block{ Num: uint64(len(blocks) + 1), Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), @@ -5446,7 +5453,7 @@ func TestProcessor_BackwardLET(t *testing.T) { { name: "backward let event (only the last bridge)", setupBlocks: func() []sync.Block { - blocks := buildBlocksWithSequentialBridges(3, 2) + blocks := buildBlocksWithSequentialBridges(3, 2, 0, 0) backwardLETBlock := sync.Block{ Num: uint64(len(blocks) + 1), Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), @@ -5465,10 +5472,34 @@ func TestProcessor_BackwardLET(t *testing.T) { }, targetDepositCount: 5, }, + { + name: "backward let event in the middle of bridges", + setupBlocks: func() []sync.Block { + blocks := buildBlocksWithSequentialBridges(2, 3, 0, 0) + backwardLETBlock := sync.Block{ + Num: uint64(len(blocks) + 1), + Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: uint64(len(blocks) + 1), + BlockPos: 0, + PreviousDepositCount: big.NewInt(5), + NewDepositCount: big.NewInt(2), + }}, + }, + } + blocks = append(blocks, backwardLETBlock) + blocks = append(blocks, buildBlocksWithSequentialBridges(3, 2, uint64(len(blocks)), 3)...) + + return blocks + }, + targetDepositCount: 8, + skipBlocks: []uint64{2, 3}, // all the bridges from these blocks were backwarded + }, { name: "overlapping backward let events", setupBlocks: func() []sync.Block { - blocks := buildBlocksWithSequentialBridges(3, 2) + blocks := buildBlocksWithSequentialBridges(3, 2, 0, 0) blocks = append(blocks, sync.Block{ Num: uint64(len(blocks) + 1), Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), @@ -5558,7 +5589,7 @@ func TestProcessor_BackwardLET(t *testing.T) { { name: "backward let after a couple of bridges + reorg backward let", setupBlocks: func() []sync.Block { - blocks := buildBlocksWithSequentialBridges(3, 2) + blocks := buildBlocksWithSequentialBridges(3, 2, 0, 0) backwardLETBlock := sync.Block{ Num: uint64(len(blocks) + 1), Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), @@ -5605,7 +5636,7 @@ func TestProcessor_BackwardLET(t *testing.T) { lastProcessedBlock, err := p.GetLastProcessedBlock(t.Context()) require.NoError(t, err) - expectedBridges := collectExpectedBridgesUpTo(t, blocks, c.targetDepositCount) + expectedBridges := collectExpectedBridgesUpTo(t, blocks, c.skipBlocks, c.targetDepositCount) for i := range expectedBridges { for _, restored := range c.restoredBridgeDepositCounts { if expectedBridges[i].DepositCount == restored { From e8dc5e02dff0ca7e0047786536bd1901690c83ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Wed, 17 Dec 2025 14:49:50 +0100 Subject: [PATCH 32/73] fix: lint --- bridgesync/migrations/migrations.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/bridgesync/migrations/migrations.go b/bridgesync/migrations/migrations.go index 07eb69164..d718e93e6 100644 --- a/bridgesync/migrations/migrations.go +++ b/bridgesync/migrations/migrations.go @@ -45,9 +45,6 @@ func init() { }) } -//go:embed bridgesync0012.sql -var mig0012 string - func RunMigrations(dbPath string) error { // Pre-calculate total length total := len(migrations) + len(treemigrations.Migrations) From d24339d79cd173ffb90bf566dbb730d3cc95d076 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Wed, 17 Dec 2025 15:06:02 +0100 Subject: [PATCH 33/73] fix unit tests --- bridgesync/migrations/bridgesync0012.sql | 15 +++++++++------ bridgesync/processor.go | 4 ++-- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/bridgesync/migrations/bridgesync0012.sql b/bridgesync/migrations/bridgesync0012.sql index 1ca911fd5..693d8e71f 100644 --- a/bridgesync/migrations/bridgesync0012.sql +++ b/bridgesync/migrations/bridgesync0012.sql @@ -16,6 +16,9 @@ CREATE TABLE IF NOT EXISTS backward_let ( PRIMARY KEY (block_num, block_pos) ); +ALTER TABLE bridge ADD COLUMN source TEXT; +ALTER TABLE bridge ADD COLUMN to_address VARCHAR; + ------------------------------------------------------------------------------ -- Create bridge_archive table ------------------------------------------------------------------------------ @@ -34,7 +37,8 @@ CREATE TABLE IF NOT EXISTS bridge_archive ( block_timestamp INTEGER, txn_sender VARCHAR, from_address VARCHAR, - source TEXT + source TEXT, + to_address VARCHAR ); ------------------------------------------------------------------------------ @@ -59,7 +63,8 @@ BEGIN block_timestamp, txn_sender, from_address, - source + source, + to_address ) VALUES ( OLD.deposit_count, @@ -76,9 +81,7 @@ BEGIN OLD.block_timestamp, OLD.txn_sender, OLD.from_address, - OLD.source + OLD.source, + OLD.to_address ); END; - -ALTER TABLE bridge ADD COLUMN source TEXT; -ALTER TABLE bridge ADD COLUMN to_address VARCHAR; diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 8e98f7023..e119e87de 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1270,12 +1270,12 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { INSERT INTO bridge ( block_num, block_pos, leaf_type, origin_network, origin_address, destination_network, destination_address, amount, metadata, - tx_hash, block_timestamp, txn_sender, deposit_count, from_address, source + tx_hash, block_timestamp, txn_sender, deposit_count, from_address, source, to_address ) SELECT block_num, block_pos, leaf_type, origin_network, origin_address, destination_network, destination_address, amount, metadata, - tx_hash, block_timestamp, txn_sender, deposit_count, from_address, $1 + tx_hash, block_timestamp, txn_sender, deposit_count, from_address, $1, to_address FROM bridge_archive WHERE deposit_count > $2 AND deposit_count <= $3 ORDER BY deposit_count ASC; From b09316909412dada469ead2966f05c8949fe6df3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Wed, 17 Dec 2025 15:58:50 +0100 Subject: [PATCH 34/73] refactor processing of backward let event --- bridgesync/processor.go | 88 +++++++++++++++++++++++++----------- bridgesync/processor_test.go | 4 +- 2 files changed, 63 insertions(+), 29 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index e119e87de..d18295a2f 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1306,7 +1306,12 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { newDepositCount, prevDepositCount, err) } + restoredDepositCounts := make([]uint32, 0, len(restoredBridges)) for _, restoredBridge := range restoredBridges { + if p.log.IsEnabledLogLevel(zapcore.DebugLevel) { + restoredDepositCounts = append(restoredDepositCounts, restoredBridge.DepositCount) + } + if _, err = p.exitTree.PutLeaf(tx, restoredBridge.BlockNum, restoredBridge.BlockPos, types.Leaf{ Index: restoredBridge.DepositCount, @@ -1319,6 +1324,8 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { } } + p.log.Debugf("restored bridges with deposit counts: %v", restoredDepositCounts) + // --------------------------------------------------------------------- // 4. Remove restored bridges from the bridge_archive table // --------------------------------------------------------------------- @@ -1460,43 +1467,22 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } if event.BackwardLET != nil { - newDepositCountU64, err := aggkitcommon.SafeUint64(event.BackwardLET.NewDepositCount) + newDepositCount, leafIndex, err := normalizeDepositCount(event.BackwardLET.NewDepositCount) if err != nil { - return fmt.Errorf("failed to convert new deposit count to uint64: %w", err) - } - - leafIndex, err := aggkitcommon.SafeUint32(newDepositCountU64) - if err != nil { - return fmt.Errorf("failed to convert new deposit count (uint64) to leaf index (uint32): %w", - err) + return err } // 1. remove all the bridges whose deposit_count is greater than the one captured by the BackwardLET event - deleteBridgesSQL := fmt.Sprintf("DELETE from %s WHERE deposit_count > $1 RETURNING deposit_count", bridgeTableName) - rows, err := tx.Query(deleteBridgesSQL, newDepositCountU64) + err = p.deleteBridgesAbove(ctx, tx, newDepositCount) if err != nil { - return fmt.Errorf("failed to delete bridges: %w", err) - } - defer rows.Close() - - var deleted []uint32 - for rows.Next() { - var depositCount uint32 - if err := rows.Scan(&depositCount); err != nil { - return err - } - deleted = append(deleted, depositCount) - } - - if len(deleted) > 0 { - p.log.Debugf("deleted bridges with deposit_count > %d due to BackwardLET: %v", - newDepositCountU64, deleted) + return fmt.Errorf("failed to delete bridges above deposit count %d: %w", + newDepositCount, err) } // 2. remove all leafs from the exit tree with indices greater than leafIndex in the exit tree if err := p.exitTree.BackwardToIndex(ctx, tx, leafIndex); err != nil { p.log.Errorf("failed to backward local exit tree to leaf index %d (deposit count: %d)", - leafIndex, newDepositCountU64) + leafIndex, newDepositCount) return err } @@ -1534,6 +1520,54 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return nil } +// normalizeDepositCount checks whether given depositCount can fit into the uint64 and uint32 and downcasts it. +// Otherwise it returns an error. +func normalizeDepositCount(depositCount *big.Int) (uint64, uint32, error) { + u64, err := aggkitcommon.SafeUint64(depositCount) + if err != nil { + return 0, 0, fmt.Errorf("invalid deposit count: %w", err) + } + + u32, err := aggkitcommon.SafeUint32(u64) + if err != nil { + return 0, 0, fmt.Errorf("invalid deposit count: %w", err) + } + + return u64, u32, nil +} + +// deleteBridgesAbove removes all the bridges whose depositCount is greater than the provided one. +func (p *processor) deleteBridgesAbove(ctx context.Context, tx dbtypes.Txer, depositCount uint64) error { + query := fmt.Sprintf(` + DELETE FROM %s + WHERE deposit_count > $1 + RETURNING deposit_count + `, bridgeTableName) + + rows, err := tx.QueryContext(ctx, query, depositCount) + if err != nil { + return err + } + defer rows.Close() + + var deleted []uint32 + for rows.Next() { + var dc uint32 + if err := rows.Scan(&dc); err != nil { + return err + } + deleted = append(deleted, dc) + } + + if len(deleted) > 0 { + p.log.Debugf("BackwardLET removed bridges with deposit_count > %d: %v", + depositCount, deleted, + ) + } + + return nil +} + // GetTotalNumberOfRecords returns the total number of records in the given table func (p *processor) GetTotalNumberOfRecords(ctx context.Context, tableName, whereClause string) (int, error) { if !tableNameRegex.MatchString(tableName) { diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 200b35c14..4a354c7ed 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -5565,7 +5565,7 @@ func TestProcessor_BackwardLET(t *testing.T) { }, }} }, - processBlockErrMsg: "failed to convert new deposit count to uint64", + processBlockErrMsg: "invalid deposit count: value=-3 does not fit in uint64", }, { name: "backward let invalid new deposit count (outside of uint32 range)", @@ -5584,7 +5584,7 @@ func TestProcessor_BackwardLET(t *testing.T) { }, }} }, - processBlockErrMsg: "failed to convert new deposit count (uint64) to leaf index (uint32)", + processBlockErrMsg: "invalid deposit count: value=4294967296 exceeds uint32 max", }, { name: "backward let after a couple of bridges + reorg backward let", From 3d9814f04fe43a2cb18baf62d8ae60853bd488cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Thu, 18 Dec 2025 09:21:22 +0100 Subject: [PATCH 35/73] remove bridges and leafs from exit tree, and then restore only the subset of bridges that were not removed by reorg --- bridgesync/processor.go | 190 ++++++++++++++++++++--------------- bridgesync/processor_test.go | 31 +++++- 2 files changed, 136 insertions(+), 85 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index d18295a2f..fa4368005 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -125,8 +125,8 @@ var ( type BridgeSource string const ( - BridgeSourceBackwardLET BridgeSource = "backward_let" - BridgeSourceForwardLET BridgeSource = "forward_let" + BridgeSourceRestoredBackwardLET BridgeSource = "restored_backward_let" + BridgeSourceForwardLET BridgeSource = "forward_let" ) // Bridge is the representation of a bridge event @@ -1249,8 +1249,14 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { }() // --------------------------------------------------------------------- - // 1. Load affected BackwardLETs BEFORE deleting blocks, bridges and BackwardLET entries + // 1. Load affected deposit counts and BackwardLETs BEFORE deleting blocks, bridges and BackwardLET entries // --------------------------------------------------------------------- + depositCountsToRemove, err := loadReorgedDepositCounts(tx, firstReorgedBlock) + if err != nil { + p.log.Errorf("failed to retrieve reorged bridges: %v", err) + return err + } + backwardLETsQuery := ` SELECT previous_deposit_count, new_deposit_count FROM backward_let @@ -1260,84 +1266,10 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { return fmt.Errorf("failed to retrieve the affected backward LETs: %w", err) } - // --------------------------------------------------------------------- - // 2. Restore bridge rows from archive for each interval - // --------------------------------------------------------------------- - restoredBridgesQuery := `SELECT * FROM bridge - WHERE deposit_count > $1 AND deposit_count <= $2 - ORDER BY deposit_count ASC;` - bridgeRestorationSQL := ` - INSERT INTO bridge ( - block_num, block_pos, leaf_type, origin_network, origin_address, - destination_network, destination_address, amount, metadata, - tx_hash, block_timestamp, txn_sender, deposit_count, from_address, source, to_address - ) - SELECT - block_num, block_pos, leaf_type, origin_network, origin_address, - destination_network, destination_address, amount, metadata, - tx_hash, block_timestamp, txn_sender, deposit_count, from_address, $1, to_address - FROM bridge_archive - WHERE deposit_count > $2 AND deposit_count <= $3 - ORDER BY deposit_count ASC; - ` - for _, backwardLET := range backwardLETs { - prevDepositCount, err := aggkitcommon.SafeUint64(backwardLET.PreviousDepositCount) - if err != nil { - return fmt.Errorf("invalid previous deposit count %s: %w", backwardLET.PreviousDepositCount, err) - } - - newDepositCount, err := aggkitcommon.SafeUint64(backwardLET.NewDepositCount) - if err != nil { - return fmt.Errorf("invalid new deposit count %s: %w", backwardLET.NewDepositCount, err) - } - - if _, err := tx.Exec(bridgeRestorationSQL, BridgeSourceBackwardLET, newDepositCount, prevDepositCount); err != nil { - return fmt.Errorf("failed to restore bridges from bridge archive (deposit counts range: %d..%d): %w", - newDepositCount, prevDepositCount, err) - } - - // --------------------------------------------------------------------- - // 3. Restore bridges in the exit tree - // --------------------------------------------------------------------- - var restoredBridges []*Bridge - err = meddler.QueryAll(tx, &restoredBridges, restoredBridgesQuery, newDepositCount, prevDepositCount) - if err != nil { - return fmt.Errorf("failed to retrieve the restored bridges (deposit counts range: %d..%d): %w", - newDepositCount, prevDepositCount, err) - } - - restoredDepositCounts := make([]uint32, 0, len(restoredBridges)) - for _, restoredBridge := range restoredBridges { - if p.log.IsEnabledLogLevel(zapcore.DebugLevel) { - restoredDepositCounts = append(restoredDepositCounts, restoredBridge.DepositCount) - } - - if _, err = p.exitTree.PutLeaf(tx, restoredBridge.BlockNum, restoredBridge.BlockPos, - types.Leaf{ - Index: restoredBridge.DepositCount, - Hash: restoredBridge.Hash(), - }); err != nil { - if errors.Is(err, tree.ErrInvalidIndex) { - p.halt(fmt.Sprintf("error adding leaf to the exit tree: %v", err)) - } - return sync.ErrInconsistentState - } - } - - p.log.Debugf("restored bridges with deposit counts: %v", restoredDepositCounts) - - // --------------------------------------------------------------------- - // 4. Remove restored bridges from the bridge_archive table - // --------------------------------------------------------------------- - _, err = tx.Exec(`DELETE FROM bridge_archive WHERE deposit_count > $1 AND deposit_count <= $2`, - newDepositCount, prevDepositCount) - if err != nil { - return fmt.Errorf("failed to delete restored rows from archive (range %d..%d): %w", - newDepositCount, prevDepositCount, err) - } - } - - blocksRes, err := tx.Exec(`DELETE FROM block WHERE num >= $1;`, firstReorgedBlock) + // --------------------------------------------------------- + // 2. Delete blocks (cascade delete everything else) + // --------------------------------------------------------- + blocksRes, err := tx.Exec(`DELETE FROM block WHERE num >= $1`, firstReorgedBlock) if err != nil { p.log.Errorf("failed to delete blocks during reorg: %v", err) return err @@ -1349,11 +1281,22 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { return err } - if err = p.exitTree.Reorg(tx, firstReorgedBlock); err != nil { + // --------------------------------------------------------- + // 3. Reorg exit tree to clean state + // --------------------------------------------------------- + if err := p.exitTree.Reorg(tx, firstReorgedBlock); err != nil { p.log.Errorf("failed to reorg exit tree: %v", err) return err } + // --------------------------------------------------------- + // 4. Restore bridges removed by BackwardLET + // --------------------------------------------------------- + err = p.restoreBackwardLETBridges(tx, backwardLETs, depositCountsToRemove) + if err != nil { + return err + } + if err = tx.Commit(); err != nil { p.log.Errorf("failed to commit reorg transaction: %v", err) return err @@ -1370,6 +1313,89 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { return nil } +// restoreBackwardLETBridges restores bridges that were previously removed by BackwardLET events +func (p *processor) restoreBackwardLETBridges(tx dbtypes.Txer, backwardLETs []*BackwardLET, + reorgedDepositCounts map[uint32]struct{}) error { + restoreQuery := ` + SELECT * + FROM bridge_archive + WHERE deposit_count > $1 AND deposit_count <= $2 + ORDER BY deposit_count ASC + ` + + for _, backwardLET := range backwardLETs { + prev, err := aggkitcommon.SafeUint64(backwardLET.PreviousDepositCount) + if err != nil { + return fmt.Errorf("invalid previous deposit count: %w", err) + } + + next, err := aggkitcommon.SafeUint64(backwardLET.NewDepositCount) + if err != nil { + return fmt.Errorf("invalid new deposit count: %w", err) + } + + var bridges []*Bridge + if err := meddler.QueryAll(tx, &bridges, restoreQuery, next, prev); err != nil { + return err + } + + for _, b := range bridges { + if _, ok := reorgedDepositCounts[b.DepositCount]; ok { + // skip cascade-deleted bridges (prevent from restoring them) + continue + } + + // tag the bridge as restored by reorged BackwardLET event + b.Source = BridgeSourceRestoredBackwardLET + if err := meddler.Insert(tx, bridgeTableName, b); err != nil { + return err + } + + leaf := types.Leaf{ + Index: b.DepositCount, + Hash: b.Hash(), + } + if _, err := p.exitTree.PutLeaf(tx, b.BlockNum, b.BlockPos, leaf); err != nil { + return err + } + } + + // cleanup bridge_archive + if _, err := tx.Exec(` + DELETE FROM bridge_archive + WHERE deposit_count > $1 AND deposit_count <= $2 + `, next, prev); err != nil { + return err + } + } + + return nil +} + +// loadReorgedDepositCounts retrieves the bridges that are going to be deleted by the reorg, +// and returns its deposit counts +func loadReorgedDepositCounts(tx dbtypes.Txer, fromBlock uint64) (map[uint32]struct{}, error) { + rows, err := tx.Query(` + SELECT deposit_count + FROM bridge_archive + WHERE block_num >= $1 + `, fromBlock) + if err != nil { + return nil, err + } + defer rows.Close() + + result := make(map[uint32]struct{}) + for rows.Next() { + var dc uint32 + if err := rows.Scan(&dc); err != nil { + return nil, err + } + result[dc] = struct{}{} + } + return result, nil +} + // ProcessBlock process the events of the block to build the exit tree // and updates the last processed block (can be called without events for that purpose) func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 4a354c7ed..5551230c6 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -5597,7 +5597,7 @@ func TestProcessor_BackwardLET(t *testing.T) { Event{BackwardLET: &BackwardLET{ BlockNum: 4, BlockPos: 0, - PreviousDepositCount: big.NewInt(6), + PreviousDepositCount: big.NewInt(5), NewDepositCount: big.NewInt(2), }}, }, @@ -5610,13 +5610,38 @@ func TestProcessor_BackwardLET(t *testing.T) { targetDepositCount: 3, restoredBridgeDepositCounts: []uint32{3}, }, + { + name: "backward let event in the middle of bridges + reorg backward let", + setupBlocks: func() []sync.Block { + blocks := buildBlocksWithSequentialBridges(2, 3, 0, 0) + backwardLETBlock := sync.Block{ + Num: uint64(len(blocks) + 1), + Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: uint64(len(blocks) + 1), + BlockPos: 0, + PreviousDepositCount: big.NewInt(5), + NewDepositCount: big.NewInt(2), + }}, + }, + } + blocks = append(blocks, backwardLETBlock) + blocks = append(blocks, buildBlocksWithSequentialBridges(3, 2, uint64(len(blocks)), 3)...) + + return blocks + }, + firstReorgedBlock: uint64Ptr(3), + targetDepositCount: 5, + restoredBridgeDepositCounts: []uint32{3, 4, 5}, + }, } for _, c := range testCases { t.Run(c.name, func(t *testing.T) { dbPath := filepath.Join(t.TempDir(), "backward_let_cases.sqlite") require.NoError(t, migrations.RunMigrations(dbPath)) - p, err := newProcessor(dbPath, "bridge-syncer", log.GetDefaultLogger(), dbQueryTimeout) + p, err := newProcessor(dbPath, "bridge-syncer", log.GetDefaultLogger(), 2*time.Minute) require.NoError(t, err) blocks := c.setupBlocks() @@ -5640,7 +5665,7 @@ func TestProcessor_BackwardLET(t *testing.T) { for i := range expectedBridges { for _, restored := range c.restoredBridgeDepositCounts { if expectedBridges[i].DepositCount == restored { - expectedBridges[i].Source = BridgeSourceBackwardLET + expectedBridges[i].Source = BridgeSourceRestoredBackwardLET } } } From 60e78ccfcb62b8cf9f64a3654ed3c4262bc69596 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Thu, 18 Dec 2025 11:28:20 +0100 Subject: [PATCH 36/73] address copilot's comments --- bridgesync/migrations/bridgesync0012.sql | 4 ++-- bridgesync/migrations/migrations.go | 2 +- bridgesync/processor_test.go | 3 ++- common/common.go | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/bridgesync/migrations/bridgesync0012.sql b/bridgesync/migrations/bridgesync0012.sql index 693d8e71f..20609bd7a 100644 --- a/bridgesync/migrations/bridgesync0012.sql +++ b/bridgesync/migrations/bridgesync0012.sql @@ -16,7 +16,7 @@ CREATE TABLE IF NOT EXISTS backward_let ( PRIMARY KEY (block_num, block_pos) ); -ALTER TABLE bridge ADD COLUMN source TEXT; +ALTER TABLE bridge ADD COLUMN source TEXT DEFAULT ''; ALTER TABLE bridge ADD COLUMN to_address VARCHAR; ------------------------------------------------------------------------------ @@ -37,7 +37,7 @@ CREATE TABLE IF NOT EXISTS bridge_archive ( block_timestamp INTEGER, txn_sender VARCHAR, from_address VARCHAR, - source TEXT, + source TEXT DEFAULT '', to_address VARCHAR ); diff --git a/bridgesync/migrations/migrations.go b/bridgesync/migrations/migrations.go index d718e93e6..a806e684a 100644 --- a/bridgesync/migrations/migrations.go +++ b/bridgesync/migrations/migrations.go @@ -46,7 +46,7 @@ func init() { } func RunMigrations(dbPath string) error { - // Pre-calculate total length + // Allocate slice with exact capacity to avoid reallocations when combining migrations total := len(migrations) + len(treemigrations.Migrations) combined := make([]types.Migration, 0, total) diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 7e6a95b5b..a3ea43351 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "math" "math/big" "os" "path" @@ -5579,7 +5580,7 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockNum: 1, BlockPos: 0, PreviousDepositCount: big.NewInt(0), - NewDepositCount: big.NewInt(4294967296), + NewDepositCount: new(big.Int).SetUint64(uint64(math.MaxUint32) + 1), }}, }, }} diff --git a/common/common.go b/common/common.go index 9a2f0cf42..c1a45c6ea 100644 --- a/common/common.go +++ b/common/common.go @@ -205,7 +205,7 @@ func SafeUint64(i *big.Int) (uint64, error) { // Otherwise it returns an error. func SafeUint32(v uint64) (uint32, error) { if v > math.MaxUint32 { - return 0, fmt.Errorf("value=%d exceeds uint32 max (%d)", v, uint32(math.MaxUint32)) + return 0, fmt.Errorf("value=%d exceeds uint32 max (%d)", v, math.MaxUint32) } return uint32(v), nil } From 5ffbd3194960eac306695343c33d020b7a5ee154 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Thu, 18 Dec 2025 11:39:47 +0100 Subject: [PATCH 37/73] small optimization --- bridgesync/processor.go | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index fa4368005..3832a2b10 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1251,12 +1251,6 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { // --------------------------------------------------------------------- // 1. Load affected deposit counts and BackwardLETs BEFORE deleting blocks, bridges and BackwardLET entries // --------------------------------------------------------------------- - depositCountsToRemove, err := loadReorgedDepositCounts(tx, firstReorgedBlock) - if err != nil { - p.log.Errorf("failed to retrieve reorged bridges: %v", err) - return err - } - backwardLETsQuery := ` SELECT previous_deposit_count, new_deposit_count FROM backward_let @@ -1266,6 +1260,15 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { return fmt.Errorf("failed to retrieve the affected backward LETs: %w", err) } + var depositCountsToRemove map[uint32]struct{} + if len(backwardLETs) > 0 { + depositCountsToRemove, err = loadReorgedDepositCounts(tx, firstReorgedBlock) + if err != nil { + p.log.Errorf("failed to retrieve reorged bridges: %v", err) + return err + } + } + // --------------------------------------------------------- // 2. Delete blocks (cascade delete everything else) // --------------------------------------------------------- @@ -1315,7 +1318,7 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { // restoreBackwardLETBridges restores bridges that were previously removed by BackwardLET events func (p *processor) restoreBackwardLETBridges(tx dbtypes.Txer, backwardLETs []*BackwardLET, - reorgedDepositCounts map[uint32]struct{}) error { + removedDepositCounts map[uint32]struct{}) error { restoreQuery := ` SELECT * FROM bridge_archive @@ -1340,7 +1343,7 @@ func (p *processor) restoreBackwardLETBridges(tx dbtypes.Txer, backwardLETs []*B } for _, b := range bridges { - if _, ok := reorgedDepositCounts[b.DepositCount]; ok { + if _, ok := removedDepositCounts[b.DepositCount]; ok { // skip cascade-deleted bridges (prevent from restoring them) continue } @@ -1373,7 +1376,9 @@ func (p *processor) restoreBackwardLETBridges(tx dbtypes.Txer, backwardLETs []*B } // loadReorgedDepositCounts retrieves the bridges that are going to be deleted by the reorg, -// and returns its deposit counts +// and returns its deposit counts. +// The bridges are retrieved from the bridge_archive table, because in case there were BackwardLET events, +// they would have already deleted the bridges from bridge table. func loadReorgedDepositCounts(tx dbtypes.Txer, fromBlock uint64) (map[uint32]struct{}, error) { rows, err := tx.Query(` SELECT deposit_count @@ -1387,11 +1392,11 @@ func loadReorgedDepositCounts(tx dbtypes.Txer, fromBlock uint64) (map[uint32]str result := make(map[uint32]struct{}) for rows.Next() { - var dc uint32 - if err := rows.Scan(&dc); err != nil { + var depositCount uint32 + if err := rows.Scan(&depositCount); err != nil { return nil, err } - result[dc] = struct{}{} + result[depositCount] = struct{}{} } return result, nil } From a7390c20b5730cb3cdd1ac0be150ea057ad127dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Fri, 19 Dec 2025 13:04:42 +0100 Subject: [PATCH 38/73] drop the bridge archive trigger and do it programatically, tweak test --- bridgesync/migrations/bridgesync0012.sql | 46 -------------- bridgesync/processor.go | 62 +++++++++++-------- bridgesync/processor_test.go | 78 ++++++++++++++---------- 3 files changed, 84 insertions(+), 102 deletions(-) diff --git a/bridgesync/migrations/bridgesync0012.sql b/bridgesync/migrations/bridgesync0012.sql index 20609bd7a..983d24982 100644 --- a/bridgesync/migrations/bridgesync0012.sql +++ b/bridgesync/migrations/bridgesync0012.sql @@ -1,5 +1,4 @@ -- +migrate Down -DROP TRIGGER IF EXISTS archive_bridge_before_delete; DROP TABLE IF EXISTS bridge_archive; DROP TABLE IF EXISTS backward_let; ALTER TABLE bridge DROP COLUMN source; @@ -40,48 +39,3 @@ CREATE TABLE IF NOT EXISTS bridge_archive ( source TEXT DEFAULT '', to_address VARCHAR ); - ------------------------------------------------------------------------------- --- Create BEFORE DELETE trigger: archive only deleted rows ------------------------------------------------------------------------------- -CREATE TRIGGER IF NOT EXISTS archive_bridge_before_delete -BEFORE DELETE ON bridge -FOR EACH ROW -BEGIN - INSERT OR IGNORE INTO bridge_archive ( - deposit_count, - block_num, - block_pos, - leaf_type, - origin_network, - origin_address, - destination_network, - destination_address, - amount, - metadata, - tx_hash, - block_timestamp, - txn_sender, - from_address, - source, - to_address - ) - VALUES ( - OLD.deposit_count, - OLD.block_num, - OLD.block_pos, - OLD.leaf_type, - OLD.origin_network, - OLD.origin_address, - OLD.destination_network, - OLD.destination_address, - OLD.amount, - OLD.metadata, - OLD.tx_hash, - OLD.block_timestamp, - OLD.txn_sender, - OLD.from_address, - OLD.source, - OLD.to_address - ); -END; diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 3832a2b10..e517088d9 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -125,8 +125,8 @@ var ( type BridgeSource string const ( - BridgeSourceRestoredBackwardLET BridgeSource = "restored_backward_let" - BridgeSourceForwardLET BridgeSource = "forward_let" + BridgeSourceBackwardLET BridgeSource = "backward_let" + BridgeSourceForwardLET BridgeSource = "forward_let" ) // Bridge is the representation of a bridge event @@ -1348,8 +1348,8 @@ func (p *processor) restoreBackwardLETBridges(tx dbtypes.Txer, backwardLETs []*B continue } - // tag the bridge as restored by reorged BackwardLET event - b.Source = BridgeSourceRestoredBackwardLET + // reset source + b.Source = "" if err := meddler.Insert(tx, bridgeTableName, b); err != nil { return err } @@ -1503,8 +1503,9 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } - // 1. remove all the bridges whose deposit_count is greater than the one captured by the BackwardLET event - err = p.deleteBridgesAbove(ctx, tx, newDepositCount) + // 1. archive and remove all the bridges whose + // deposit_count is greater than the one captured by the BackwardLET event + err = p.archiveAndDeleteBridgesAbove(ctx, tx, newDepositCount) if err != nil { return fmt.Errorf("failed to delete bridges above deposit count %d: %w", newDepositCount, err) @@ -1567,32 +1568,43 @@ func normalizeDepositCount(depositCount *big.Int) (uint64, uint32, error) { return u64, u32, nil } -// deleteBridgesAbove removes all the bridges whose depositCount is greater than the provided one. -func (p *processor) deleteBridgesAbove(ctx context.Context, tx dbtypes.Txer, depositCount uint64) error { - query := fmt.Sprintf(` - DELETE FROM %s - WHERE deposit_count > $1 - RETURNING deposit_count - `, bridgeTableName) - - rows, err := tx.QueryContext(ctx, query, depositCount) - if err != nil { +// archiveAndDeleteBridgesAbove archives and removes all the bridges whose depositCount is greater than the provided one +func (p *processor) archiveAndDeleteBridgesAbove(ctx context.Context, tx dbtypes.Txer, depositCount uint64) error { + // 1. Load candidates + query := fmt.Sprintf(`SELECT * FROM %s WHERE deposit_count > $1`, bridgeTableName) + var bridges []*Bridge + if err := meddler.QueryAll(tx, &bridges, query, depositCount); err != nil { return err } - defer rows.Close() - var deleted []uint32 - for rows.Next() { - var dc uint32 - if err := rows.Scan(&dc); err != nil { + if len(bridges) == 0 { + return nil + } + + deletedDepositCounts := make([]uint32, 0, len(bridges)) + // 2. Archive + for _, b := range bridges { + b.Source = BridgeSourceBackwardLET + if err := meddler.Insert(tx, "bridge_archive", b); err != nil { return err } - deleted = append(deleted, dc) + deletedDepositCounts = append(deletedDepositCounts, b.DepositCount) + } + + // 3. Delete originals + deleteQuery := fmt.Sprintf(` + DELETE FROM %s + WHERE deposit_count > $1`, + bridgeTableName) + + _, err := tx.ExecContext(ctx, deleteQuery, depositCount) + if err != nil { + return err } - if len(deleted) > 0 { - p.log.Debugf("BackwardLET removed bridges with deposit_count > %d: %v", - depositCount, deleted, + if len(deletedDepositCounts) > 0 { + p.log.Debugf("BackwardLET archived + removed %d bridges with deposit_count > %d: %v", + len(deletedDepositCounts), depositCount, deletedDepositCounts, ) } diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index a3ea43351..0d8f798f5 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -5401,13 +5401,13 @@ func TestProcessor_BackwardLET(t *testing.T) { } testCases := []struct { - name string - setupBlocks func() []sync.Block - firstReorgedBlock *uint64 - targetDepositCount uint32 - skipBlocks []uint64 - restoredBridgeDepositCounts []uint32 - processBlockErrMsg string + name string + setupBlocks func() []sync.Block + firstReorgedBlock *uint64 + targetDepositCount uint32 + skipBlocks []uint64 + archivedDepositCounts []uint32 + processBlockErrMsg string }{ { name: "backward let after a couple of bridges", @@ -5428,10 +5428,11 @@ func TestProcessor_BackwardLET(t *testing.T) { return blocks }, - targetDepositCount: 2, + targetDepositCount: 2, + archivedDepositCounts: []uint32{3}, }, { - name: "backward let event with all the bridges", + name: "backward let event with all the bridges, except the first one", setupBlocks: func() []sync.Block { blocks := buildBlocksWithSequentialBridges(3, 2, 0, 0) blocks = append(blocks, sync.Block{ @@ -5441,7 +5442,7 @@ func TestProcessor_BackwardLET(t *testing.T) { Event{BackwardLET: &BackwardLET{ BlockNum: uint64(len(blocks) + 1), BlockPos: 0, - PreviousDepositCount: big.NewInt(6), + PreviousDepositCount: big.NewInt(5), NewDepositCount: big.NewInt(0), }}, }, @@ -5449,7 +5450,8 @@ func TestProcessor_BackwardLET(t *testing.T) { return blocks }, - targetDepositCount: 0, + targetDepositCount: 0, + archivedDepositCounts: []uint32{1, 2, 3, 4, 5}, }, { name: "backward let event (only the last bridge)", @@ -5462,8 +5464,8 @@ func TestProcessor_BackwardLET(t *testing.T) { Event{BackwardLET: &BackwardLET{ BlockNum: uint64(len(blocks) + 1), BlockPos: 0, - PreviousDepositCount: big.NewInt(6), - NewDepositCount: big.NewInt(5), + PreviousDepositCount: big.NewInt(5), + NewDepositCount: big.NewInt(4), }}, }, } @@ -5471,7 +5473,8 @@ func TestProcessor_BackwardLET(t *testing.T) { return blocks }, - targetDepositCount: 5, + targetDepositCount: 4, + archivedDepositCounts: []uint32{5}, }, { name: "backward let event in the middle of bridges", @@ -5494,8 +5497,9 @@ func TestProcessor_BackwardLET(t *testing.T) { return blocks }, - targetDepositCount: 8, - skipBlocks: []uint64{2, 3}, // all the bridges from these blocks were backwarded + targetDepositCount: 8, + skipBlocks: []uint64{2, 3}, // all the bridges from these blocks were backwarded + archivedDepositCounts: []uint32{3, 4, 5}, }, { name: "overlapping backward let events", @@ -5508,7 +5512,7 @@ func TestProcessor_BackwardLET(t *testing.T) { Event{BackwardLET: &BackwardLET{ BlockNum: uint64(len(blocks) + 1), BlockPos: 0, - PreviousDepositCount: big.NewInt(6), + PreviousDepositCount: big.NewInt(5), NewDepositCount: big.NewInt(3), }}, }, @@ -5528,7 +5532,8 @@ func TestProcessor_BackwardLET(t *testing.T) { return blocks }, - targetDepositCount: 3, + targetDepositCount: 3, + archivedDepositCounts: []uint32{4, 5}, }, { name: "backward let on empty bridge table", @@ -5607,9 +5612,9 @@ func TestProcessor_BackwardLET(t *testing.T) { return blocks }, - firstReorgedBlock: uint64Ptr(3), - targetDepositCount: 3, - restoredBridgeDepositCounts: []uint32{3}, + firstReorgedBlock: uint64Ptr(3), + targetDepositCount: 3, + archivedDepositCounts: []uint32{3}, }, { name: "backward let event in the middle of bridges + reorg backward let", @@ -5632,9 +5637,9 @@ func TestProcessor_BackwardLET(t *testing.T) { return blocks }, - firstReorgedBlock: uint64Ptr(3), - targetDepositCount: 5, - restoredBridgeDepositCounts: []uint32{3, 4, 5}, + firstReorgedBlock: uint64Ptr(3), + targetDepositCount: 5, + archivedDepositCounts: []uint32{3, 4, 5}, }, } @@ -5655,6 +5660,24 @@ func TestProcessor_BackwardLET(t *testing.T) { } } + if len(c.archivedDepositCounts) > 0 { + archivedBridgeQuery := ` + SELECT * FROM bridge_archive + WHERE deposit_count <= $1 + ORDER BY deposit_count ASC` + + maxDepositCount := slices.Max(c.archivedDepositCounts) + var archivedBridges []*Bridge + err = meddler.QueryAll(p.db, &archivedBridges, archivedBridgeQuery, maxDepositCount) + require.NoError(t, err) + + require.Len(t, archivedBridges, len(c.archivedDepositCounts)) + for i, b := range archivedBridges { + require.Equal(t, c.archivedDepositCounts[i], b.DepositCount) + require.Equal(t, BridgeSourceBackwardLET, b.Source) + } + } + if c.firstReorgedBlock != nil { err = p.Reorg(t.Context(), *c.firstReorgedBlock) require.NoError(t, err) @@ -5663,13 +5686,6 @@ func TestProcessor_BackwardLET(t *testing.T) { lastProcessedBlock, err := p.GetLastProcessedBlock(t.Context()) require.NoError(t, err) expectedBridges := collectExpectedBridgesUpTo(t, blocks, c.skipBlocks, c.targetDepositCount) - for i := range expectedBridges { - for _, restored := range c.restoredBridgeDepositCounts { - if expectedBridges[i].DepositCount == restored { - expectedBridges[i].Source = BridgeSourceRestoredBackwardLET - } - } - } actualBridges, err := p.GetBridges(t.Context(), 0, lastProcessedBlock) require.NoError(t, err) From 228956531b07a8ff756f2bbfc1bd4a391bf047b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Mon, 29 Dec 2025 07:37:41 +0100 Subject: [PATCH 39/73] rename constant for unique constraint error code --- db/sqlite.go | 2 +- tree/tree.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/db/sqlite.go b/db/sqlite.go index 28038964b..1a2781a8a 100644 --- a/db/sqlite.go +++ b/db/sqlite.go @@ -10,7 +10,7 @@ import ( ) const ( - UniqueConstrain = 1555 + UniqueConstraintErrCode = 1555 ) var ( diff --git a/tree/tree.go b/tree/tree.go index e685c3b0c..976334a13 100644 --- a/tree/tree.go +++ b/tree/tree.go @@ -135,7 +135,7 @@ func (t *Tree) getRHTNode(tx dbtypes.Querier, nodeHash common.Hash) (*types.Tree func (t *Tree) storeNodes(tx dbtypes.Txer, nodes []types.TreeNode) error { for _, node := range nodes { if err := meddler.Insert(tx, t.rhtTable, &node); err != nil { - if sqliteErr, ok := db.SQLiteErr(err); ok && sqliteErr.ExtendedCode == db.UniqueConstrain { + if sqliteErr, ok := db.SQLiteErr(err); ok && sqliteErr.ExtendedCode == db.UniqueConstraintErrCode { // ignore repeated entries continue } From 02a966d12f2d869eb161a3f1ed3a286b1afe9452 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= Date: Wed, 31 Dec 2025 09:13:25 +0100 Subject: [PATCH 40/73] fix migration file --- bridgesync/migrations/bridgesync0012.sql | 2 -- 1 file changed, 2 deletions(-) diff --git a/bridgesync/migrations/bridgesync0012.sql b/bridgesync/migrations/bridgesync0012.sql index 49fdc8421..3ffc19e0b 100644 --- a/bridgesync/migrations/bridgesync0012.sql +++ b/bridgesync/migrations/bridgesync0012.sql @@ -7,7 +7,6 @@ ALTER TABLE bridge DROP COLUMN to_address; DROP TABLE IF EXISTS bridge_archive; DROP TABLE IF EXISTS backward_let; ALTER TABLE bridge DROP COLUMN source; -ALTER TABLE bridge DROP COLUMN to_address; -- +migrate Up ALTER TABLE claim ADD COLUMN type TEXT NOT NULL DEFAULT ''; @@ -26,7 +25,6 @@ CREATE TABLE IF NOT EXISTS backward_let ( ); ALTER TABLE bridge ADD COLUMN source TEXT DEFAULT ''; -ALTER TABLE bridge ADD COLUMN to_address VARCHAR; ------------------------------------------------------------------------------ -- Create bridge_archive table From cfcd7ebe8eb104f5b875a3e06a62ad0dc886ac8a Mon Sep 17 00:00:00 2001 From: Goran Rojovic Date: Fri, 12 Dec 2025 07:37:08 +0100 Subject: [PATCH 41/73] feat: forward LET indexing --- abi/abi_builder.go | 107 +++++++++++++++++++++++++ abi/abi_builder_test.go | 107 +++++++++++++++++++++++++ abi/abi_decode.go | 61 ++++++++++++++ abi/abi_encode.go | 38 +++++++++ abi/abi_encode_test.go | 86 ++++++++++++++++++++ bridgesync/abi.go | 114 ++++++++++++++++++++++++++ bridgesync/abi_test.go | 167 +++++++++++++++++++++++++++++++++++++++ bridgesync/downloader.go | 24 ++++++ bridgesync/processor.go | 87 ++++++++++++++++++++ 9 files changed, 791 insertions(+) create mode 100644 abi/abi_builder.go create mode 100644 abi/abi_builder_test.go create mode 100644 abi/abi_decode.go create mode 100644 abi/abi_encode.go create mode 100644 abi/abi_encode_test.go create mode 100644 bridgesync/abi.go create mode 100644 bridgesync/abi_test.go diff --git a/abi/abi_builder.go b/abi/abi_builder.go new file mode 100644 index 000000000..e96ed2ced --- /dev/null +++ b/abi/abi_builder.go @@ -0,0 +1,107 @@ +package abi + +import ( + "fmt" + "math/big" + "reflect" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" +) + +// BuildABIFields constructs ABI ArgumentMarshaling slice from a struct type using reflection +// It uses the "abiarg" tag to determine field names and optionally types +// Tag format: `abiarg:"fieldName"` or `abiarg:"fieldName,type"` +// If type is omitted, it will be inferred from the Go type +func BuildABIFields(structType any) ([]abi.ArgumentMarshaling, error) { + t := reflect.TypeOf(structType) + if t.Kind() == reflect.Pointer { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("expected struct type, got %v", t.Kind()) + } + + fields := make([]abi.ArgumentMarshaling, 0, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + abiTag := field.Tag.Get("abiarg") + if abiTag == "" { + continue // Skip fields without abiarg tag + } + + parts := strings.Split(abiTag, ",") + name := parts[0] + + var abiType string + if len(parts) > 1 { + // Explicit type from tag + abiType = parts[1] + } else { + // Infer type from Go type + inferredType, err := inferABIType(field.Type) + if err != nil { + return nil, fmt.Errorf("field %s: %w", field.Name, err) + } + abiType = inferredType + } + + fields = append(fields, abi.ArgumentMarshaling{ + Name: name, + Type: abiType, + }) + } + + return fields, nil +} + +// inferABIType automatically maps Go types to Solidity ABI types +func inferABIType(goType reflect.Type) (string, error) { + // Handle special types first (before checking Kind) + switch goType { + case reflect.TypeOf(common.Address{}): + return "address", nil + case reflect.TypeOf(&big.Int{}), reflect.TypeOf(big.Int{}): + // Default to uint256 for big.Int, but can be overridden with explicit tag + return "uint256", nil + case reflect.TypeOf(common.Hash{}): + return "bytes32", nil + } + + switch goType.Kind() { + case reflect.Uint8: + return "uint8", nil + case reflect.Uint16: + return "uint16", nil + case reflect.Uint32: + return "uint32", nil + case reflect.Uint64: + return "uint64", nil + case reflect.Int8: + return "int8", nil + case reflect.Int16: + return "int16", nil + case reflect.Int32: + return "int32", nil + case reflect.Int64: + return "int64", nil + case reflect.Bool: + return "bool", nil + case reflect.String: + return "string", nil + case reflect.Slice: + if goType.Elem().Kind() == reflect.Uint8 { + return "bytes", nil + } + return "", fmt.Errorf("unsupported slice type: %v", goType) + case reflect.Array: + if goType.Elem().Kind() == reflect.Uint8 { + return fmt.Sprintf("bytes%d", goType.Len()), nil + } + return "", fmt.Errorf("unsupported array type: %v", goType) + } + + return "", fmt.Errorf("unsupported type: %v", goType) +} diff --git a/abi/abi_builder_test.go b/abi/abi_builder_test.go new file mode 100644 index 000000000..ecf920dd6 --- /dev/null +++ b/abi/abi_builder_test.go @@ -0,0 +1,107 @@ +package abi + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestBuildABIFields(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + Field2 uint32 `abiarg:"field2"` + Field3 common.Address `abiarg:"field3"` + Field4 *big.Int `abiarg:"field4,uint256"` + Field5 []byte `abiarg:"field5"` + Field6 string // No tag, should be skipped + } + + fields, err := BuildABIFields(TestStruct{}) + require.NoError(t, err) + require.Len(t, fields, 5) + + expected := []abi.ArgumentMarshaling{ + {Name: "field1", Type: "uint8"}, + {Name: "field2", Type: "uint32"}, + {Name: "field3", Type: "address"}, + {Name: "field4", Type: "uint256"}, + {Name: "field5", Type: "bytes"}, + } + + require.Equal(t, expected, fields) +} + +func TestBuildABIFields_TypeInference(t *testing.T) { + type TestStruct struct { + Uint8Field uint8 `abiarg:"uint8Field"` + Uint16Field uint16 `abiarg:"uint16Field"` + Uint32Field uint32 `abiarg:"uint32Field"` + Uint64Field uint64 `abiarg:"uint64Field"` + BoolField bool `abiarg:"boolField"` + StringField string `abiarg:"stringField"` + BytesField []byte `abiarg:"bytesField"` + AddressField common.Address `abiarg:"addressField"` + HashField common.Hash `abiarg:"hashField"` + BigIntField *big.Int `abiarg:"bigIntField"` // Inferred as uint256 + BigIntExplict *big.Int `abiarg:"bigIntExplict,uint128"` + } + + fields, err := BuildABIFields(TestStruct{}) + require.NoError(t, err) + require.Len(t, fields, 11) + + expected := []abi.ArgumentMarshaling{ + {Name: "uint8Field", Type: "uint8"}, + {Name: "uint16Field", Type: "uint16"}, + {Name: "uint32Field", Type: "uint32"}, + {Name: "uint64Field", Type: "uint64"}, + {Name: "boolField", Type: "bool"}, + {Name: "stringField", Type: "string"}, + {Name: "bytesField", Type: "bytes"}, + {Name: "addressField", Type: "address"}, + {Name: "hashField", Type: "bytes32"}, + {Name: "bigIntField", Type: "uint256"}, + {Name: "bigIntExplict", Type: "uint128"}, + } + + require.Equal(t, expected, fields) +} + +func TestBuildABIFields_ErrorCases(t *testing.T) { + t.Run("non-struct type", func(t *testing.T) { + _, err := BuildABIFields(42) + require.Error(t, err) + require.Contains(t, err.Error(), "expected struct type") + }) + + t.Run("unsupported field type", func(t *testing.T) { + type BadStruct struct { + InvalidField map[string]string `abiarg:"invalid"` + } + _, err := BuildABIFields(BadStruct{}) + require.Error(t, err) + require.Contains(t, err.Error(), "unsupported type") + }) +} + +func TestBuildABIFields_WithPointer(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + Field2 uint32 `abiarg:"field2"` + } + + // Test with pointer to struct + fields, err := BuildABIFields(&TestStruct{}) + require.NoError(t, err) + require.Len(t, fields, 2) + + expected := []abi.ArgumentMarshaling{ + {Name: "field1", Type: "uint8"}, + {Name: "field2", Type: "uint32"}, + } + + require.Equal(t, expected, fields) +} diff --git a/abi/abi_decode.go b/abi/abi_decode.go new file mode 100644 index 000000000..5bf372c8e --- /dev/null +++ b/abi/abi_decode.go @@ -0,0 +1,61 @@ +package abi + +import ( + "errors" + "fmt" + "reflect" + + "github.com/ethereum/go-ethereum/accounts/abi" +) + +// DecodeABIEncodedStructArray is a generic helper that decodes ABI-encoded tuple array +// It handles the ABI unpacking and type conversion boilerplate +func DecodeABIEncodedStructArray[T any]( + encodedBytes []byte, + converter func(any) (T, error), +) ([]T, error) { + if len(encodedBytes) == 0 { + return nil, errors.New("encoded bytes are empty") + } + + var item T + abiFields, err := BuildABIFields(item) + if err != nil { + return nil, fmt.Errorf("failed to build ABI fields: %w", err) + } + + arrayType, err := abi.NewType("tuple[]", "", abiFields) + if err != nil { + return nil, fmt.Errorf("failed to create array type: %w", err) + } + + args := abi.Arguments{{Type: arrayType, Name: "data"}} + + unpacked, err := args.Unpack(encodedBytes) + if err != nil { + return nil, fmt.Errorf("failed to unpack data: %w", err) + } + + if len(unpacked) == 0 { + return nil, errors.New("unpacked data is empty") + } + + // The unpacked[0] contains the slice, but we need to extract it via reflection + // since the ABI library returns anonymous structs + val := reflect.ValueOf(unpacked[0]) + if val.Kind() != reflect.Slice { + return nil, fmt.Errorf("expected slice, got %v", val.Kind()) + } + + result := make([]T, val.Len()) + for i := 0; i < val.Len(); i++ { + item := val.Index(i).Interface() + converted, err := converter(item) + if err != nil { + return nil, fmt.Errorf("failed to convert item %d: %w", i, err) + } + result[i] = converted + } + + return result, nil +} diff --git a/abi/abi_encode.go b/abi/abi_encode.go new file mode 100644 index 000000000..3d8d319df --- /dev/null +++ b/abi/abi_encode.go @@ -0,0 +1,38 @@ +package abi + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi" +) + +// EncodeABIStructArray is a generic helper that encodes a slice of structs to ABI-encoded tuple array +// It automatically builds ABI fields from the struct type using reflection and abiarg tags +func EncodeABIStructArray[T any](items []T) ([]byte, error) { + // For empty slices, we need a sample instance to build ABI fields + var item T + if len(items) > 0 { + // Use the first item to infer the type + item = items[0] + } + + // Use first item to build ABI fields + abiFields, err := BuildABIFields(item) + if err != nil { + return nil, fmt.Errorf("failed to build ABI fields: %w", err) + } + + arrayType, err := abi.NewType("tuple[]", "", abiFields) + if err != nil { + return nil, fmt.Errorf("failed to create array type: %w", err) + } + + args := abi.Arguments{{Type: arrayType, Name: "data"}} + + encodedBytes, err := args.Pack(items) + if err != nil { + return nil, fmt.Errorf("failed to pack data: %w", err) + } + + return encodedBytes, nil +} diff --git a/abi/abi_encode_test.go b/abi/abi_encode_test.go new file mode 100644 index 000000000..b3ea3537a --- /dev/null +++ b/abi/abi_encode_test.go @@ -0,0 +1,86 @@ +package abi + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestEncodeABIStructArray(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + Field2 uint32 `abiarg:"field2"` + Field3 common.Address `abiarg:"field3"` + } + + items := []TestStruct{ + { + Field1: 1, + Field2: 100, + Field3: common.HexToAddress("0x1111111111111111111111111111111111111111"), + }, + { + Field1: 2, + Field2: 200, + Field3: common.HexToAddress("0x2222222222222222222222222222222222222222"), + }, + } + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + require.NotEmpty(t, encodedBytes) + + // Decode to verify roundtrip + converter := func(item any) (TestStruct, error) { + // Simple converter for test verification + return TestStruct{}, nil + } + + _, err = DecodeABIEncodedStructArray(encodedBytes, converter) + require.NoError(t, err) +} + +func TestEncodeABIStructArray_EmptySlice(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + Field2 uint32 `abiarg:"field2"` + } + + items := []TestStruct{} + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + require.NotEmpty(t, encodedBytes) +} + +func TestEncodeABIStructArray_WithBigInt(t *testing.T) { + type TestStruct struct { + Amount *big.Int `abiarg:"amount,uint256"` + } + + items := []TestStruct{ + {Amount: big.NewInt(1000)}, + {Amount: big.NewInt(2000)}, + } + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + require.NotEmpty(t, encodedBytes) +} + +func TestEncodeABIStructArray_NoTags(t *testing.T) { + type BadStruct struct { + Field1 uint8 + Field2 uint32 + } + + items := []BadStruct{ + {Field1: 1, Field2: 100}, + } + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) // Should work with empty fields (encodes empty array) + require.NotEmpty(t, encodedBytes) +} diff --git a/bridgesync/abi.go b/bridgesync/abi.go new file mode 100644 index 000000000..3575050d2 --- /dev/null +++ b/bridgesync/abi.go @@ -0,0 +1,114 @@ +package bridgesync + +import ( + "errors" + "fmt" + "math/big" + "reflect" + + aggkitabi "github.com/agglayer/aggkit/abi" + "github.com/ethereum/go-ethereum/common" +) + +type LeafData struct { + LeafType uint8 `abiarg:"leafType"` + OriginNetwork uint32 `abiarg:"originNetwork"` + OriginAddress common.Address `abiarg:"originAddress"` + DestinationNetwork uint32 `abiarg:"destinationNetwork"` + DestinationAddress common.Address `abiarg:"destinationAddress"` + Amount *big.Int `abiarg:"amount,uint256"` + Metadata []byte `abiarg:"metadata"` +} + +func (l LeafData) ToBridge(blockNum, blockPos, blockTimestamp uint64, depositCount uint32) Bridge { + return Bridge{ + BlockNum: blockNum, + // for all leaves from ForwardLET, BlockPos is the same as they are in the same transaction + BlockPos: blockPos, + BlockTimestamp: blockTimestamp, + DepositCount: depositCount, + LeafType: l.LeafType, + OriginNetwork: l.OriginNetwork, + OriginAddress: l.OriginAddress, + DestinationNetwork: l.DestinationNetwork, + DestinationAddress: l.DestinationAddress, + Amount: l.Amount, + Metadata: l.Metadata, + // FromAddress is always zero address, because this is a recovery mechanism bridge + // TxnSender is always zero address, because this is a recovery mechanism bridge + } +} + +// decodeForwardLETLeaves decodes the newLeaves bytes from a ForwardLET event +func decodeForwardLETLeaves(newLeavesBytes []byte) ([]LeafData, error) { + return aggkitabi.DecodeABIEncodedStructArray(newLeavesBytes, convertABILeafData) +} + +// convertABILeafData converts an anonymous struct returned by the ABI decoder +func convertABILeafData(item any) (LeafData, error) { + // Use reflection to extract fields from the anonymous struct created by ABI library + // The ABI library generates structs with JSON tags that don't match our named types + val := reflect.ValueOf(item) + if val.Kind() != reflect.Struct { + return LeafData{}, fmt.Errorf("expected struct, got %T", item) + } + + expectedFields := reflect.TypeOf(LeafData{}).NumField() + if val.NumField() != expectedFields { + return LeafData{}, fmt.Errorf("expected %d fields, got %d", expectedFields, val.NumField()) + } + + // Create a map of field names to values from the ABI struct + fieldMap := make(map[string]any) + valType := val.Type() + for i := 0; i < val.NumField(); i++ { + fieldName := valType.Field(i).Name + fieldMap[fieldName] = val.Field(i).Interface() + } + + // Extract fields by name with type assertions + leafType, ok := fieldMap["LeafType"].(uint8) + if !ok { + return LeafData{}, errors.New("failed to convert field 'leafType' to uint8") + } + + originNetwork, ok := fieldMap["OriginNetwork"].(uint32) + if !ok { + return LeafData{}, errors.New("failed to convert field 'originNetwork' to uint32") + } + + originAddress, ok := fieldMap["OriginAddress"].(common.Address) + if !ok { + return LeafData{}, errors.New("failed to convert field 'originAddress' to common.Address") + } + + destinationNetwork, ok := fieldMap["DestinationNetwork"].(uint32) + if !ok { + return LeafData{}, errors.New("failed to convert field 'destinationNetwork' to uint32") + } + + destinationAddress, ok := fieldMap["DestinationAddress"].(common.Address) + if !ok { + return LeafData{}, errors.New("failed to convert field 'destinationAddress' to common.Address") + } + + amount, ok := fieldMap["Amount"].(*big.Int) + if !ok { + return LeafData{}, errors.New("failed to convert field 'amount' to *big.Int") + } + + metadata, ok := fieldMap["Metadata"].([]byte) + if !ok { + return LeafData{}, errors.New("failed to convert field 'metadata' to []byte") + } + + return LeafData{ + LeafType: leafType, + OriginNetwork: originNetwork, + OriginAddress: originAddress, + DestinationNetwork: destinationNetwork, + DestinationAddress: destinationAddress, + Amount: amount, + Metadata: metadata, + }, nil +} diff --git a/bridgesync/abi_test.go b/bridgesync/abi_test.go new file mode 100644 index 000000000..87f85655e --- /dev/null +++ b/bridgesync/abi_test.go @@ -0,0 +1,167 @@ +package bridgesync + +import ( + "math/big" + "testing" + + aggkitabi "github.com/agglayer/aggkit/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestDecodeForwardLETLeaves(t *testing.T) { + largeAmount := new(big.Int) + largeAmount.SetString("123456789012345678901234567890", 10) + + testCases := []struct { + name string + inputLeaves []LeafData + expectedLeaves []LeafData + errorMsg string + useRawBytes bool + rawBytes []byte + }{ + { + name: "successfully decode single leaf", + inputLeaves: []LeafData{ + { + LeafType: 1, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + DestinationNetwork: 10, + DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + Amount: big.NewInt(1000000), + Metadata: []byte("test metadata"), + }, + }, + }, + { + name: "successfully decode multiple leaves", + inputLeaves: []LeafData{ + { + LeafType: 0, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x1111111111111111111111111111111111111111"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x2222222222222222222222222222222222222222"), + Amount: big.NewInt(100), + Metadata: []byte("first leaf"), + }, + { + LeafType: 1, + OriginNetwork: 3, + OriginAddress: common.HexToAddress("0x3333333333333333333333333333333333333333"), + DestinationNetwork: 4, + DestinationAddress: common.HexToAddress("0x4444444444444444444444444444444444444444"), + Amount: big.NewInt(200), + Metadata: []byte("second leaf"), + }, + { + LeafType: 2, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x5555555555555555555555555555555555555555"), + DestinationNetwork: 6, + DestinationAddress: common.HexToAddress("0x6666666666666666666666666666666666666666"), + Amount: big.NewInt(300), + Metadata: []byte("third leaf"), + }, + }, + }, + { + name: "decode leaf with empty metadata", + inputLeaves: []LeafData{ + { + LeafType: 0, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), + Amount: big.NewInt(999), + Metadata: []byte{}, + }, + }, + }, + { + name: "decode leaf with large amount", + inputLeaves: []LeafData{ + { + LeafType: 255, // Max uint8 + OriginNetwork: 4294967295, // Max uint32 + OriginAddress: common.HexToAddress("0xffffffffffffffffffffffffffffffffffffffff"), + DestinationNetwork: 4294967295, // Max uint32 + DestinationAddress: common.HexToAddress("0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"), + Amount: largeAmount, + Metadata: []byte("large amount test"), + }, + }, + }, + { + name: "decode empty array", + inputLeaves: []LeafData{}, + }, + { + name: "fail on empty bytes", + useRawBytes: true, + rawBytes: []byte{}, + errorMsg: "encoded bytes are empty", + }, + { + name: "fail on invalid encoded data", + useRawBytes: true, + rawBytes: []byte{0x00, 0x01, 0x02, 0x03, 0x04}, + errorMsg: "failed to unpack data", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var encodedBytes []byte + var expectedLeaves []LeafData + + if tc.useRawBytes { + encodedBytes = tc.rawBytes + } else { + encodedBytes = encodeLeafDataArray(t, tc.inputLeaves) + expectedLeaves = tc.inputLeaves + } + + decodedLeaves, err := decodeForwardLETLeaves(encodedBytes) + + if tc.errorMsg != "" { + require.ErrorContains(t, err, tc.errorMsg) + require.Nil(t, decodedLeaves) + } else { + require.NoError(t, err) + require.Len(t, decodedLeaves, len(expectedLeaves)) + for i, expected := range expectedLeaves { + verifyLeafData(t, expected, decodedLeaves[i]) + } + } + }) + } +} + +// encodeLeafDataArray encodes a slice of LeafData using Solidity ABI encoding +// This simulates what the smart contract does with abi.encode(newLeaves) +func encodeLeafDataArray(t *testing.T, leaves []LeafData) []byte { + t.Helper() + + encodedBytes, err := aggkitabi.EncodeABIStructArray(leaves) + require.NoError(t, err) + + return encodedBytes +} + +// verifyLeafData compares two LeafData structs for equality +func verifyLeafData(t *testing.T, expected, actual LeafData) { + t.Helper() + + require.Equal(t, expected.LeafType, actual.LeafType, "LeafType mismatch") + require.Equal(t, expected.OriginNetwork, actual.OriginNetwork, "OriginNetwork mismatch") + require.Equal(t, expected.OriginAddress, actual.OriginAddress, "OriginAddress mismatch") + require.Equal(t, expected.DestinationNetwork, actual.DestinationNetwork, "DestinationNetwork mismatch") + require.Equal(t, expected.DestinationAddress, actual.DestinationAddress, "DestinationAddress mismatch") + require.Equal(t, 0, expected.Amount.Cmp(actual.Amount), "Amount mismatch: expected %s, got %s", + expected.Amount.String(), actual.Amount.String()) + require.Equal(t, expected.Metadata, actual.Metadata, "Metadata mismatch") +} diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go index 89ed5a5f7..5af3cde1b 100644 --- a/bridgesync/downloader.go +++ b/bridgesync/downloader.go @@ -55,6 +55,7 @@ var ( "SetClaim(bytes32)", )) backwardLETEventSignature = crypto.Keccak256Hash([]byte("BackwardLET(uint256,bytes32,uint256,bytes32)")) + forwardLETEventSignature = crypto.Keccak256Hash([]byte("ForwardLET(uint256,bytes32,uint256,bytes32,bytes)")) claimAssetEtrogMethodID = common.Hex2Bytes("ccaa2d11") claimMessageEtrogMethodID = common.Hex2Bytes("f5efcd79") @@ -116,6 +117,7 @@ func buildAppender( appender[unsetClaimEventSignature] = buildUnsetClaimEventHandler(bridgeDeployment.agglayerBridgeL2) appender[setClaimEventSignature] = buildSetClaimEventHandler(bridgeDeployment.agglayerBridgeL2) appender[backwardLETEventSignature] = buildBackwardLETEventHandler(bridgeDeployment.agglayerBridgeL2) + appender[forwardLETEventSignature] = buildForwardLETEventHandler(bridgeDeployment.agglayerBridgeL2) return appender, nil } @@ -682,6 +684,28 @@ func buildBackwardLETEventHandler(contract *agglayerbridgel2.Agglayerbridgel2) f } } +// buildForwardLETEventHandler creates a handler for the ForwardLET event log +func buildForwardLETEventHandler(contract *agglayerbridgel2.Agglayerbridgel2) func(*sync.EVMBlock, types.Log) error { + return func(b *sync.EVMBlock, l types.Log) error { + event, err := contract.ParseForwardLET(l) + if err != nil { + return fmt.Errorf("error parsing ForwardLET event log %+v: %w", l, err) + } + + b.Events = append(b.Events, Event{ForwardLET: &ForwardLET{ + BlockNum: b.Num, + BlockPos: uint64(l.Index), + BlockTimestamp: b.Timestamp, + PreviousDepositCount: event.PreviousDepositCount, + PreviousRoot: event.PreviousRoot, + NewDepositCount: event.NewDepositCount, + NewRoot: event.NewRoot, + NewLeaves: event.NewLeaves, + }}) + return nil + } +} + type Call struct { From common.Address `json:"from"` To common.Address `json:"to"` diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 376b209ed..4262d73d9 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -54,6 +54,9 @@ const ( // backwardLETTableName is the name of the table that stores backward local exit tree events backwardLETTableName = "backward_let" + // forwardLETTableName is the name of the table that stores forward local exit tree events + forwardLETTableName = "forward_let" + // nilStr holds nil string nilStr = "nil" ) @@ -503,6 +506,39 @@ func (b *BackwardLET) String() string { b.BlockNum, b.BlockPos, previousDepositCountStr, b.PreviousRoot.String(), newDepositCountStr, b.NewRoot.String()) } +// ForwardLET representation of a ForwardLET event, +// that is emitted by the L2 bridge contract when a LET is advanced. +type ForwardLET struct { + BlockNum uint64 `meddler:"block_num"` + BlockPos uint64 `meddler:"block_pos"` + BlockTimestamp uint64 `meddler:"block_timestamp"` + PreviousDepositCount *big.Int `meddler:"previous_deposit_count,bigint"` + PreviousRoot common.Hash `meddler:"previous_root,hash"` + NewDepositCount *big.Int `meddler:"new_deposit_count,bigint"` + NewRoot common.Hash `meddler:"new_root,hash"` + NewLeaves []byte `meddler:"new_leaves"` +} + +// String returns a formatted string representation of ForwardLET for debugging and logging. +func (f *ForwardLET) String() string { + prevDepositCountStr := nilStr + if f.PreviousDepositCount != nil { + prevDepositCountStr = f.PreviousDepositCount.String() + } + + newDepositCountStr := nilStr + if f.NewDepositCount != nil { + newDepositCountStr = f.NewDepositCount.String() + } + + return fmt.Sprintf("ForwardLET{BlockNum: %d, BlockPos: %d, "+ + "PreviousDepositCount: %s, PreviousRoot: %s, "+ + "NewDepositCount: %s, NewRoot: %s, NewLeaves: %x}", + f.BlockNum, f.BlockPos, + prevDepositCountStr, f.PreviousRoot.String(), + newDepositCountStr, f.NewRoot.String(), f.NewLeaves) +} + // Event combination of bridge, claim, token mapping and legacy token migration events type Event struct { Bridge *Bridge @@ -513,6 +549,7 @@ type Event struct { UnsetClaim *UnsetClaim SetClaim *SetClaim BackwardLET *BackwardLET + ForwardLET *ForwardLET } func (e Event) String() string { @@ -541,6 +578,9 @@ func (e Event) String() string { if e.BackwardLET != nil { parts = append(parts, e.BackwardLET.String()) } + if e.ForwardLET != nil { + parts = append(parts, e.ForwardLET.String()) + } return "Event{" + strings.Join(parts, ", ") + "}" } @@ -1559,6 +1599,13 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } } + + if event.ForwardLET != nil { + if err = p.handleForwardLETEvent(tx, event.ForwardLET); err != nil { + p.log.Errorf("failed to handle forward LET event at block %d: %v", block.Num, err) + return err + } + } } if err := tx.Commit(); err != nil { @@ -1646,6 +1693,46 @@ func (p *processor) archiveAndDeleteBridgesAbove(ctx context.Context, tx dbtypes return nil } +// handleForwardLETEvent processes a ForwardLET event and updates the database accordingly +func (p *processor) handleForwardLETEvent(tx dbtypes.Txer, event *ForwardLET) error { + decodedNewLeaves, err := decodeForwardLETLeaves(event.NewLeaves) + if err != nil { + return fmt.Errorf("failed to decode new leaves in forward LET: %w", err) + } + + newDepositCount := uint32(event.PreviousDepositCount.Uint64()) + 1 + + for _, leaf := range decodedNewLeaves { + bridge := leaf.ToBridge( + event.BlockNum, + event.BlockPos, + event.BlockTimestamp, + newDepositCount, + ) + + if _, err = p.exitTree.PutLeaf(tx, event.BlockNum, event.BlockPos, types.Leaf{ + Index: newDepositCount, + Hash: bridge.Hash(), + }); err != nil { + if errors.Is(err, tree.ErrInvalidIndex) { + p.halt(fmt.Sprintf("error adding leaf to the exit tree: %v", err)) + } + return sync.ErrInconsistentState + } + if err = meddler.Insert(tx, bridgeTableName, bridge); err != nil { + return fmt.Errorf("failed to insert bridge event from ForwardLET: %w", err) + } + + newDepositCount++ + } + + if err = meddler.Insert(tx, forwardLETTableName, event); err != nil { + return fmt.Errorf("failed to insert forward local exit tree event: %w", err) + } + + return nil +} + // GetTotalNumberOfRecords returns the total number of records in the given table func (p *processor) GetTotalNumberOfRecords(ctx context.Context, tableName, whereClause string) (int, error) { if !tableNameRegex.MatchString(tableName) { From 243cead7bd611b091effdfa2a61bb4a0204b1b83 Mon Sep 17 00:00:00 2001 From: Goran Rojovic Date: Mon, 15 Dec 2025 09:54:40 +0100 Subject: [PATCH 42/73] feat: increment blockPos based on forwardLET events --- bridgesync/abi.go | 28 +++++- bridgesync/backfill_tx_sender.go | 10 +- bridgesync/backfill_tx_sender_test.go | 106 +++++++++++++++++++ bridgesync/downloader.go | 1 + bridgesync/migrations/bridgesync0012.sql | 13 +++ bridgesync/processor.go | 123 ++++++++++++++++++++--- 6 files changed, 260 insertions(+), 21 deletions(-) diff --git a/bridgesync/abi.go b/bridgesync/abi.go index 3575050d2..a349bf7d7 100644 --- a/bridgesync/abi.go +++ b/bridgesync/abi.go @@ -20,13 +20,32 @@ type LeafData struct { Metadata []byte `abiarg:"metadata"` } -func (l LeafData) ToBridge(blockNum, blockPos, blockTimestamp uint64, depositCount uint32) Bridge { +func (l LeafData) String() string { + return fmt.Sprintf("LeafData{LeafType: %d, OriginNetwork: %d, OriginAddress: %s, "+ + "DestinationNetwork: %d, DestinationAddress: %s, Amount: %s, Metadata: %x}", + l.LeafType, + l.OriginNetwork, + l.OriginAddress.Hex(), + l.DestinationNetwork, + l.DestinationAddress.Hex(), + l.Amount.String(), + l.Metadata, + ) +} + +func (l LeafData) ToBridge( + blockNum, blockPos, blockTimestamp uint64, + depositCount uint32, + txnHash common.Hash, + txnSender, fromAddr common.Address) Bridge { return Bridge{ - BlockNum: blockNum, - // for all leaves from ForwardLET, BlockPos is the same as they are in the same transaction + BlockNum: blockNum, BlockPos: blockPos, BlockTimestamp: blockTimestamp, DepositCount: depositCount, + TxHash: txnHash, + FromAddress: fromAddr, + TxnSender: txnSender, LeafType: l.LeafType, OriginNetwork: l.OriginNetwork, OriginAddress: l.OriginAddress, @@ -34,8 +53,7 @@ func (l LeafData) ToBridge(blockNum, blockPos, blockTimestamp uint64, depositCou DestinationAddress: l.DestinationAddress, Amount: l.Amount, Metadata: l.Metadata, - // FromAddress is always zero address, because this is a recovery mechanism bridge - // TxnSender is always zero address, because this is a recovery mechanism bridge + Source: BridgeSourceForwardLET, // this leaf comes from ForwardLET event } } diff --git a/bridgesync/backfill_tx_sender.go b/bridgesync/backfill_tx_sender.go index f3ef9f85b..29798fa51 100644 --- a/bridgesync/backfill_tx_sender.go +++ b/bridgesync/backfill_tx_sender.go @@ -163,8 +163,9 @@ func (b *BackfillTxnSender) getRecordsNeedingBackfillCount(ctx context.Context, query := fmt.Sprintf(` SELECT COUNT(*) FROM %s - WHERE txn_sender = '' OR txn_sender IS NULL OR from_address = '' OR from_address IS NULL - `, tableName) + WHERE (txn_sender = '' OR txn_sender IS NULL OR from_address = '' OR from_address IS NULL) + AND (source IS NULL OR (source != '%s' AND source != '%s')) + `, tableName, BridgeSourceBackwardLET, BridgeSourceForwardLET) var count int dbCtx, cancel := context.WithTimeout(ctx, b.dbTimeout) @@ -188,9 +189,10 @@ func (b *BackfillTxnSender) getRecordsNeedingBackfill( query := fmt.Sprintf(` SELECT * FROM %s - WHERE txn_sender = '' OR txn_sender IS NULL OR from_address = '' OR from_address IS NULL + WHERE (txn_sender = '' OR txn_sender IS NULL OR from_address = '' OR from_address IS NULL) + AND (source IS NULL OR (source != '%s' AND source != '%s')) LIMIT $1 - `, tableName) + `, tableName, BridgeSourceBackwardLET, BridgeSourceForwardLET) dbCtx, cancel := context.WithTimeout(ctx, b.dbTimeout) defer cancel() diff --git a/bridgesync/backfill_tx_sender_test.go b/bridgesync/backfill_tx_sender_test.go index 685229a4b..216700f10 100644 --- a/bridgesync/backfill_tx_sender_test.go +++ b/bridgesync/backfill_tx_sender_test.go @@ -412,6 +412,112 @@ func TestBackfillTxnSender_getRecordsNeedingBackfillCount(t *testing.T) { require.Equal(t, 1, count) }) + t.Run("excludes backward_let and forward_let sources", func(t *testing.T) { + tempDir := t.TempDir() + dbPath := filepath.Join(tempDir, "test.db") + + // Run migrations + err := migrations.RunMigrations(dbPath) + require.NoError(t, err) + + // Create test data + database, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + defer database.Close() + + ctx := context.Background() + tx, err := db.NewTx(ctx, database) + require.NoError(t, err) + + // Insert test data + _, err = tx.Exec(`INSERT INTO block (num) VALUES (1), (2), (3), (4)`) + require.NoError(t, err) + + // Insert bridge with empty txn_sender and NULL source (should be counted) + _, err = tx.Exec(` + INSERT INTO bridge ( + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, deposit_count, + tx_hash, block_timestamp, from_address, txn_sender, source + ) VALUES ( + 1, 0, 1, 1, '0x1234567890123456789012345678901234567890', + 2, '0x0987654321098765432109876543210987654321', '1000000000000000000', + '', 1, '0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890', + 1234567890, '0x1111111111111111111111111111111111111111', '', NULL + ) + `) + require.NoError(t, err) + + // Insert bridge with empty txn_sender and backward_let source (should NOT be counted) + _, err = tx.Exec(` + INSERT INTO bridge ( + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, deposit_count, + tx_hash, block_timestamp, from_address, txn_sender, source + ) VALUES ( + 2, 0, 1, 1, '0x1234567890123456789012345678901234567890', + 2, '0x0987654321098765432109876543210987654321', '1000000000000000000', + '', 2, '0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567891', + 1234567890, '', '', 'backward_let' + ) + `) + require.NoError(t, err) + + // Insert bridge with empty txn_sender and forward_let source (should NOT be counted) + _, err = tx.Exec(` + INSERT INTO bridge ( + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, deposit_count, + tx_hash, block_timestamp, from_address, txn_sender, source + ) VALUES ( + 3, 0, 1, 1, '0x1234567890123456789012345678901234567890', + 2, '0x0987654321098765432109876543210987654321', '1000000000000000000', + '', 3, '0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567892', + 1234567890, '', '', 'forward_let' + ) + `) + require.NoError(t, err) + + // Insert bridge with empty txn_sender and no source field (should be counted) + _, err = tx.Exec(` + INSERT INTO bridge ( + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, deposit_count, + tx_hash, block_timestamp, from_address, txn_sender + ) VALUES ( + 4, 0, 1, 1, '0x1234567890123456789012345678901234567890', + 2, '0x0987654321098765432109876543210987654321', '1000000000000000000', + '', 4, '0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567893', + 1234567890, '', '' + ) + `) + require.NoError(t, err) + + err = tx.Commit() + require.NoError(t, err) + + mockClient := mocks.NewEthClienter(t) + logger := log.WithFields("module", "test") + backfiller, err := NewBackfillTxnSender(dbPath, mockClient, common.HexToAddress("0x1234"), logger) + require.NoError(t, err) + defer backfiller.Close() + + // Should only count the 2 records without backward_let or forward_let source + count, err := backfiller.getRecordsNeedingBackfillCount(ctx, "bridge") + require.NoError(t, err) + require.Equal(t, 2, count) + + // Verify getRecordsNeedingBackfill also excludes these sources + records, err := backfiller.getRecordsNeedingBackfill(ctx, "bridge", 10) + require.NoError(t, err) + require.Len(t, records, 2) + + // Verify the correct records were returned (block_num 1 and 4) + blockNums := []uint64{records[0].BlockNum, records[1].BlockNum} + require.Contains(t, blockNums, uint64(1)) + require.Contains(t, blockNums, uint64(4)) + }) + t.Run("database error", func(t *testing.T) { tempDir := t.TempDir() dbPath := filepath.Join(tempDir, "test.db") diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go index 5af3cde1b..49f78a4c8 100644 --- a/bridgesync/downloader.go +++ b/bridgesync/downloader.go @@ -696,6 +696,7 @@ func buildForwardLETEventHandler(contract *agglayerbridgel2.Agglayerbridgel2) fu BlockNum: b.Num, BlockPos: uint64(l.Index), BlockTimestamp: b.Timestamp, + TxnHash: l.TxHash, PreviousDepositCount: event.PreviousDepositCount, PreviousRoot: event.PreviousRoot, NewDepositCount: event.NewDepositCount, diff --git a/bridgesync/migrations/bridgesync0012.sql b/bridgesync/migrations/bridgesync0012.sql index 3ffc19e0b..516bee88c 100644 --- a/bridgesync/migrations/bridgesync0012.sql +++ b/bridgesync/migrations/bridgesync0012.sql @@ -6,6 +6,7 @@ ALTER TABLE bridge DROP COLUMN to_address; DROP TABLE IF EXISTS bridge_archive; DROP TABLE IF EXISTS backward_let; +DROP TABLE IF EXISTS forward_let; ALTER TABLE bridge DROP COLUMN source; -- +migrate Up @@ -26,6 +27,18 @@ CREATE TABLE IF NOT EXISTS backward_let ( ALTER TABLE bridge ADD COLUMN source TEXT DEFAULT ''; +CREATE TABLE IF NOT EXISTS forward_let ( + block_num INTEGER NOT NULL REFERENCES block (num) ON DELETE CASCADE, + block_pos INTEGER NOT NULL, + block_timestamp INTEGER NOT NULL, + tx_hash VARCHAR NOT NULL, + previous_deposit_count TEXT NOT NULL, + previous_root VARCHAR NOT NULL, + new_deposit_count TEXT NOT NULL, + new_root VARCHAR NOT NULL, + new_leaves BLOB NOT NULL, + PRIMARY KEY (block_num, block_pos) + ); ------------------------------------------------------------------------------ -- Create bridge_archive table ------------------------------------------------------------------------------ diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 4262d73d9..b0000d378 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -143,9 +143,9 @@ type Bridge struct { BlockTimestamp uint64 `meddler:"block_timestamp"` LeafType uint8 `meddler:"leaf_type"` OriginNetwork uint32 `meddler:"origin_network"` - OriginAddress common.Address `meddler:"origin_address"` + OriginAddress common.Address `meddler:"origin_address,address"` DestinationNetwork uint32 `meddler:"destination_network"` - DestinationAddress common.Address `meddler:"destination_address"` + DestinationAddress common.Address `meddler:"destination_address,address"` Amount *big.Int `meddler:"amount,bigint"` Metadata []byte `meddler:"metadata"` DepositCount uint32 `meddler:"deposit_count"` @@ -512,6 +512,7 @@ type ForwardLET struct { BlockNum uint64 `meddler:"block_num"` BlockPos uint64 `meddler:"block_pos"` BlockTimestamp uint64 `meddler:"block_timestamp"` + TxnHash common.Hash `meddler:"tx_hash,hash"` PreviousDepositCount *big.Int `meddler:"previous_deposit_count,bigint"` PreviousRoot common.Hash `meddler:"previous_root,hash"` NewDepositCount *big.Int `meddler:"new_deposit_count,bigint"` @@ -532,9 +533,11 @@ func (f *ForwardLET) String() string { } return fmt.Sprintf("ForwardLET{BlockNum: %d, BlockPos: %d, "+ + "BlockTimestamp: %d, TxnHash: %s, "+ "PreviousDepositCount: %s, PreviousRoot: %s, "+ "NewDepositCount: %s, NewRoot: %s, NewLeaves: %x}", f.BlockNum, f.BlockPos, + f.BlockTimestamp, f.TxnHash.String(), prevDepositCountStr, f.PreviousRoot.String(), newDepositCountStr, f.NewRoot.String(), f.NewLeaves) } @@ -1506,6 +1509,7 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } + var blockPos *uint64 for _, e := range block.Events { event, ok := e.(Event) if !ok { @@ -1514,6 +1518,13 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } if event.Bridge != nil { + if blockPos != nil { + // increment block position based on forward LET events processed so far + // in the current block + event.Bridge.BlockPos = *blockPos + *blockPos++ + } + if _, err = p.exitTree.PutLeaf(tx, block.Num, event.Bridge.BlockPos, types.Leaf{ Index: event.Bridge.DepositCount, Hash: event.Bridge.Hash(), @@ -1601,10 +1612,13 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } if event.ForwardLET != nil { - if err = p.handleForwardLETEvent(tx, event.ForwardLET); err != nil { + newBlockPos, err := p.handleForwardLETEvent(tx, event.ForwardLET, blockPos) + if err != nil { p.log.Errorf("failed to handle forward LET event at block %d: %v", block.Num, err) return err } + + blockPos = &newBlockPos } } @@ -1693,44 +1707,129 @@ func (p *processor) archiveAndDeleteBridgesAbove(ctx context.Context, tx dbtypes return nil } +// sanityCheckLatestLER checks if the provided local exit root matches the latest one in the exit tree +func (p *processor) sanityCheckLatestLER(tx dbtypes.Txer, ler common.Hash) error { + root, err := p.exitTree.GetLastRoot(tx) + if err != nil { + return fmt.Errorf("failed to get last root from exit tree: %w", err) + } + if root.Hash != ler { + return fmt.Errorf("local exit root mismatch: expected %s, got %s", + ler.String(), root.Hash.String()) + } + return nil +} + // handleForwardLETEvent processes a ForwardLET event and updates the database accordingly -func (p *processor) handleForwardLETEvent(tx dbtypes.Txer, event *ForwardLET) error { +func (p *processor) handleForwardLETEvent(tx dbtypes.Txer, event *ForwardLET, blockPos *uint64) (uint64, error) { + // first we sanity check that the previous root matches the latest one in the exit tree + if err := p.sanityCheckLatestLER(tx, event.PreviousRoot); err != nil { + return 0, fmt.Errorf("failed to sanity check LER before processing ForwardLET: %w", err) + } + + // first we decode the new LET leaves from the forward LET event + // they are basically bridge events, but without some fields set (tx hash, sender, from address) decodedNewLeaves, err := decodeForwardLETLeaves(event.NewLeaves) if err != nil { - return fmt.Errorf("failed to decode new leaves in forward LET: %w", err) + return 0, fmt.Errorf("failed to decode new leaves in forward LET: %w", err) } newDepositCount := uint32(event.PreviousDepositCount.Uint64()) + 1 + newBlockPos := event.BlockPos + if blockPos != nil { + newBlockPos = *blockPos + } + + const getArchivedBridgesSQL = ` + SELECT * FROM bridge_archive + WHERE leaf_type = $1 + AND origin_network = $2 + AND origin_address = $3 + AND destination_network = $4 + AND destination_address = $5 + AND amount = $6 + AND metadata = $7 + ` + // now we process each new leaf to insert them into the exit tree and bridges table for _, leaf := range decodedNewLeaves { + var archivedBridges []*Bridge + err = meddler.QueryAll(tx, &archivedBridges, getArchivedBridgesSQL, + leaf.LeafType, + leaf.OriginNetwork, + leaf.OriginAddress.Hex(), + leaf.DestinationNetwork, + leaf.DestinationAddress.Hex(), + leaf.Amount.String(), + leaf.Metadata, + ) + if err != nil { + return 0, fmt.Errorf("failed to query archived bridges: %w", err) + } + + var ( + txnHash = event.TxnHash + txnSender, fromAddr common.Address + ) + + // let's see if we have exactly one archived bridge that matches the forward LET leaf + // usually we should have exactly one match since to recover the LET on L2, + // we must have a backwards LET done which archives the bridges, + // and then a forward LET that re-adds them to the exit tree after fixing it + // however, in case of multiple matches, we cannot be sure which one to use, + // so we will just log and leave the txnSender and fromAddr fields empty + if len(archivedBridges) == 1 { + archivedBridge := archivedBridges[0] + txnHash = archivedBridge.TxHash + txnSender = archivedBridge.TxnSender + fromAddr = archivedBridge.FromAddress + } else if len(archivedBridges) > 1 { + p.log.Debugf("multiple archived bridges found that match forward LET leaf %s;"+ + "cannot set txnSender and fromAddr fields to the bridge", leaf.String()) + } + + // create the new bridge event from the forward LET leaf bridge := leaf.ToBridge( event.BlockNum, - event.BlockPos, + newBlockPos, event.BlockTimestamp, newDepositCount, + txnHash, + txnSender, + fromAddr, ) - if _, err = p.exitTree.PutLeaf(tx, event.BlockNum, event.BlockPos, types.Leaf{ + // insert the new bridge leaf into the local exit tree + if _, err = p.exitTree.PutLeaf(tx, event.BlockNum, newBlockPos, types.Leaf{ Index: newDepositCount, Hash: bridge.Hash(), }); err != nil { if errors.Is(err, tree.ErrInvalidIndex) { p.halt(fmt.Sprintf("error adding leaf to the exit tree: %v", err)) } - return sync.ErrInconsistentState + return 0, sync.ErrInconsistentState } - if err = meddler.Insert(tx, bridgeTableName, bridge); err != nil { - return fmt.Errorf("failed to insert bridge event from ForwardLET: %w", err) + + // insert the new bridge into the bridges table + if err = meddler.Insert(tx, bridgeTableName, &bridge); err != nil { + return 0, fmt.Errorf("failed to insert bridge event from ForwardLET: %w", err) } newDepositCount++ + newBlockPos++ } + // after processing all new leaves, we sanity check that the new root matches the latest one in the exit tree + if err := p.sanityCheckLatestLER(tx, event.NewRoot); err != nil { + return 0, fmt.Errorf("failed to sanity check LER before processing ForwardLET: %w", err) + } + + // finally, insert the forward LET event into the designated table if err = meddler.Insert(tx, forwardLETTableName, event); err != nil { - return fmt.Errorf("failed to insert forward local exit tree event: %w", err) + return 0, fmt.Errorf("failed to insert forward local exit tree event: %w", err) } - return nil + return newBlockPos, nil } // GetTotalNumberOfRecords returns the total number of records in the given table From 75f7c5de8e22ea98ae689a2bb21e0f1110d80e8b Mon Sep 17 00:00:00 2001 From: Goran Rojovic Date: Mon, 15 Dec 2025 17:02:02 +0100 Subject: [PATCH 43/73] feat: UT --- bridgesync/processor.go | 22 +- bridgesync/processor_forward_let_test.go | 754 +++++++++++++++++++++++ 2 files changed, 771 insertions(+), 5 deletions(-) create mode 100644 bridgesync/processor_forward_let_test.go diff --git a/bridgesync/processor.go b/bridgesync/processor.go index b0000d378..b94803540 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -143,9 +143,9 @@ type Bridge struct { BlockTimestamp uint64 `meddler:"block_timestamp"` LeafType uint8 `meddler:"leaf_type"` OriginNetwork uint32 `meddler:"origin_network"` - OriginAddress common.Address `meddler:"origin_address,address"` + OriginAddress common.Address `meddler:"origin_address"` DestinationNetwork uint32 `meddler:"destination_network"` - DestinationAddress common.Address `meddler:"destination_address,address"` + DestinationAddress common.Address `meddler:"destination_address"` Amount *big.Int `meddler:"amount,bigint"` Metadata []byte `meddler:"metadata"` DepositCount uint32 `meddler:"deposit_count"` @@ -1584,6 +1584,12 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } if event.BackwardLET != nil { + // we sanity check that the previous root matches the latest one in the exit tree + if err := p.sanityCheckLatestLER(tx, event.BackwardLET.PreviousRoot); err != nil { + p.log.Errorf("failed to sanity check LER before processing BackwardLET: %v", err) + return err + } + newDepositCount, leafIndex, err := normalizeDepositCount(event.BackwardLET.NewDepositCount) if err != nil { return err @@ -1604,7 +1610,13 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } - // 3. insert the backward let event to designated table + // 4. sanity check that the new root matches the latest one in the exit tree + if err := p.sanityCheckLatestLER(tx, event.BackwardLET.NewRoot); err != nil { + p.log.Errorf("failed to sanity check LER after processing BackwardLET: %v", err) + return err + } + + // 5. insert the backward let event to designated table if err = meddler.Insert(tx, backwardLETTableName, event.BackwardLET); err != nil { p.log.Errorf("failed to insert backward local exit tree event at block %d: %v", block.Num, err) return err @@ -1757,9 +1769,9 @@ func (p *processor) handleForwardLETEvent(tx dbtypes.Txer, event *ForwardLET, bl err = meddler.QueryAll(tx, &archivedBridges, getArchivedBridgesSQL, leaf.LeafType, leaf.OriginNetwork, - leaf.OriginAddress.Hex(), + leaf.OriginAddress, leaf.DestinationNetwork, - leaf.DestinationAddress.Hex(), + leaf.DestinationAddress, leaf.Amount.String(), leaf.Metadata, ) diff --git a/bridgesync/processor_forward_let_test.go b/bridgesync/processor_forward_let_test.go new file mode 100644 index 000000000..db04286a5 --- /dev/null +++ b/bridgesync/processor_forward_let_test.go @@ -0,0 +1,754 @@ +package bridgesync + +import ( + "fmt" + "math/big" + "path/filepath" + "testing" + + aggkitabi "github.com/agglayer/aggkit/abi" + "github.com/agglayer/aggkit/bridgesync/migrations" + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/db" + dbtypes "github.com/agglayer/aggkit/db/types" + "github.com/agglayer/aggkit/log" + "github.com/agglayer/aggkit/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/russross/meddler" + "github.com/stretchr/testify/require" +) + +func TestHandleForwardLETEvent(t *testing.T) { + t.Run("successfully process single leaf with no archived bridge", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves to establish previous root (indices 0-4) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 4; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 10+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 4; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(4) // Last index inserted + + // Insert block for the ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(100)) + require.NoError(t, err) + + // Create forward LET event with one leaf + leaves := []LeafData{ + { + LeafType: 1, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + DestinationNetwork: 10, + DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + Amount: big.NewInt(1000000), + Metadata: []byte("test metadata"), + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 100, + BlockPos: 5, + BlockTimestamp: 1234567890, + TxnHash: common.HexToHash("0xabc123"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewLeaves: encodedLeaves, + } + + // Calculate the expected root that will result from processing these leaves + event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event) + + // Test: Process the forward LET event + blockPos := event.BlockPos + newBlockPos, err := p.handleForwardLETEvent(tx, event, &blockPos) + require.NoError(t, err) + require.Equal(t, event.BlockPos+1, newBlockPos) + + // Verify: Bridge was inserted + var bridges []*Bridge + err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1", event.BlockNum) + require.NoError(t, err) + require.Len(t, bridges, 1) + + bridge := bridges[0] + require.Equal(t, event.BlockNum, bridge.BlockNum) + require.Equal(t, event.BlockPos, bridge.BlockPos) + require.Equal(t, leaves[0].LeafType, bridge.LeafType) + require.Equal(t, leaves[0].OriginNetwork, bridge.OriginNetwork) + require.Equal(t, leaves[0].OriginAddress, bridge.OriginAddress) + require.Equal(t, leaves[0].DestinationNetwork, bridge.DestinationNetwork) + require.Equal(t, leaves[0].DestinationAddress, bridge.DestinationAddress) + require.Equal(t, 0, leaves[0].Amount.Cmp(bridge.Amount)) + require.Equal(t, leaves[0].Metadata, bridge.Metadata) + require.Equal(t, initialDepositCount+1, bridge.DepositCount) + require.Equal(t, event.TxnHash, bridge.TxHash) + require.Equal(t, aggkitcommon.ZeroAddress, bridge.TxnSender) + require.Equal(t, aggkitcommon.ZeroAddress, bridge.FromAddress) + require.Equal(t, BridgeSourceForwardLET, bridge.Source) + + // Verify: ForwardLET event was inserted + var forwardLETs []*ForwardLET + err = meddler.QueryAll(tx, &forwardLETs, "SELECT * FROM forward_let WHERE block_num = $1", event.BlockNum) + require.NoError(t, err) + require.Len(t, forwardLETs, 1) + require.Equal(t, event.BlockNum, forwardLETs[0].BlockNum) + }) + + t.Run("successfully process multiple leaves", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-9) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 9; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 20+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 9; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 20+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(9) // Last index inserted + + // Insert block for the ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(200)) + require.NoError(t, err) + + // Create forward LET event with three leaves + leaves := []LeafData{ + { + LeafType: 0, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x1111111111111111111111111111111111111111"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x2222222222222222222222222222222222222222"), + Amount: big.NewInt(100), + Metadata: []byte("first"), + }, + { + LeafType: 1, + OriginNetwork: 3, + OriginAddress: common.HexToAddress("0x3333333333333333333333333333333333333333"), + DestinationNetwork: 4, + DestinationAddress: common.HexToAddress("0x4444444444444444444444444444444444444444"), + Amount: big.NewInt(200), + Metadata: []byte("second"), + }, + { + LeafType: 2, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x5555555555555555555555555555555555555555"), + DestinationNetwork: 6, + DestinationAddress: common.HexToAddress("0x6666666666666666666666666666666666666666"), + Amount: big.NewInt(300), + Metadata: []byte("third"), + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 200, + BlockPos: 10, + BlockTimestamp: 1234567900, + TxnHash: common.HexToHash("0xdef456"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + uint32(len(leaves)))), + NewLeaves: encodedLeaves, + } + + // Calculate the expected root that will result from processing these leaves + event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event) + + // Test: Process the forward LET event + blockPos := event.BlockPos + newBlockPos, err := p.handleForwardLETEvent(tx, event, &blockPos) + require.NoError(t, err) + require.Equal(t, event.BlockPos+uint64(len(leaves)), newBlockPos) + + // Verify: All bridges were inserted + var bridges []*Bridge + err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1 ORDER BY block_pos", event.BlockNum) + require.NoError(t, err) + require.Len(t, bridges, 3) + + // Verify each bridge + for i, bridge := range bridges { + require.Equal(t, event.BlockNum, bridge.BlockNum) + require.Equal(t, event.BlockPos+uint64(i), bridge.BlockPos) + require.Equal(t, leaves[i].LeafType, bridge.LeafType) + require.Equal(t, leaves[i].OriginNetwork, bridge.OriginNetwork) + require.Equal(t, initialDepositCount+uint32(i)+1, bridge.DepositCount) + require.Equal(t, BridgeSourceForwardLET, bridge.Source) + } + }) + + t.Run("process leaf with matching archived bridge", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-14) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 14; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 30+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 14; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 30+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(14) // Last index inserted + + // Insert blocks for the archived bridge and ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1), ($2)`, uint64(50), uint64(300)) + require.NoError(t, err) + + // Setup: Create and archive a bridge that will match the forward LET leaf + archivedTxHash := common.HexToHash("0xoriginal123") + archivedTxnSender := common.HexToAddress("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + archivedFromAddr := common.HexToAddress("0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + + archivedBridge := &Bridge{ + BlockNum: 50, + BlockPos: 0, + LeafType: 1, + OriginNetwork: 7, + OriginAddress: common.HexToAddress("0x7777777777777777777777777777777777777777"), + DestinationNetwork: 8, + DestinationAddress: common.HexToAddress("0x8888888888888888888888888888888888888888"), + Amount: big.NewInt(500000), + Metadata: []byte("archived metadata"), + DepositCount: 20, + TxHash: archivedTxHash, + TxnSender: archivedTxnSender, + FromAddress: archivedFromAddr, + // Don't set Source - bridge_archive table doesn't have this column + } + // Insert manually to avoid Source field + _, err = tx.Exec(` + INSERT INTO bridge_archive ( + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, deposit_count, + tx_hash, block_timestamp, from_address, txn_sender + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, 0, $12, $13) + `, archivedBridge.BlockNum, archivedBridge.BlockPos, archivedBridge.LeafType, + archivedBridge.OriginNetwork, archivedBridge.OriginAddress, + archivedBridge.DestinationNetwork, archivedBridge.DestinationAddress, + archivedBridge.Amount.String(), archivedBridge.Metadata, archivedBridge.DepositCount, + archivedBridge.TxHash.Hex(), archivedBridge.FromAddress.Hex(), archivedBridge.TxnSender.Hex()) + require.NoError(t, err) + + // Create forward LET event with matching leaf + leaves := []LeafData{ + { + LeafType: archivedBridge.LeafType, + OriginNetwork: archivedBridge.OriginNetwork, + OriginAddress: archivedBridge.OriginAddress, + DestinationNetwork: archivedBridge.DestinationNetwork, + DestinationAddress: archivedBridge.DestinationAddress, + Amount: archivedBridge.Amount, + Metadata: archivedBridge.Metadata, + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 300, + BlockPos: 20, + BlockTimestamp: 1234567950, + TxnHash: common.HexToHash("0xforward789"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewLeaves: encodedLeaves, + } + + // Calculate expected new root using helper (which will query for archived bridge) + event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event, archivedBridge) + + // Test: Process the forward LET event + blockPos := event.BlockPos + newBlockPos, err := p.handleForwardLETEvent(tx, event, &blockPos) + require.NoError(t, err) + require.Equal(t, event.BlockPos+1, newBlockPos) + + // Verify: Bridge was inserted with archived tx info + var bridges []*Bridge + err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1", event.BlockNum) + require.NoError(t, err) + require.Len(t, bridges, 1) + + bridge := bridges[0] + require.Equal(t, archivedTxHash, bridge.TxHash, "Should use archived tx hash") + require.Equal(t, archivedTxnSender, bridge.TxnSender, "Should use archived txn sender") + require.Equal(t, archivedFromAddr, bridge.FromAddress, "Should use archived from address") + require.Equal(t, BridgeSourceForwardLET, bridge.Source) + }) + + t.Run("process leaf with multiple matching archived bridges", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-24) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 24; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 40+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 24; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 40+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(24) // Last index inserted + + // Insert blocks for archived bridges (60, 61 already exist from initial leaves) and ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(400)) + require.NoError(t, err) + + // Setup: Create two archived bridges with identical LeafData fields + commonLeafData := LeafData{ + LeafType: 1, + OriginNetwork: 9, + OriginAddress: common.HexToAddress("0x9999999999999999999999999999999999999999"), + DestinationNetwork: 11, + DestinationAddress: common.HexToAddress("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + Amount: big.NewInt(750000), + Metadata: []byte("duplicate metadata"), + } + + archivedBridge1 := &Bridge{ + BlockNum: 60, + BlockPos: 0, + LeafType: commonLeafData.LeafType, + OriginNetwork: commonLeafData.OriginNetwork, + OriginAddress: commonLeafData.OriginAddress, + DestinationNetwork: commonLeafData.DestinationNetwork, + DestinationAddress: commonLeafData.DestinationAddress, + Amount: commonLeafData.Amount, + Metadata: commonLeafData.Metadata, + DepositCount: 30, + TxHash: common.HexToHash("0xfirst111"), + TxnSender: common.HexToAddress("0x1111111111111111111111111111111111111111"), + FromAddress: common.HexToAddress("0x2222222222222222222222222222222222222222"), + } + + archivedBridge2 := &Bridge{ + BlockNum: 61, + BlockPos: 0, + LeafType: commonLeafData.LeafType, + OriginNetwork: commonLeafData.OriginNetwork, + OriginAddress: commonLeafData.OriginAddress, + DestinationNetwork: commonLeafData.DestinationNetwork, + DestinationAddress: commonLeafData.DestinationAddress, + Amount: commonLeafData.Amount, + Metadata: commonLeafData.Metadata, + DepositCount: 31, + TxHash: common.HexToHash("0xsecond222"), + TxnSender: common.HexToAddress("0x3333333333333333333333333333333333333333"), + FromAddress: common.HexToAddress("0x4444444444444444444444444444444444444444"), + } + + // Insert both archived bridges manually (to avoid Source column) + for _, archived := range []*Bridge{archivedBridge1, archivedBridge2} { + _, err = tx.Exec(` + INSERT INTO bridge_archive ( + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, deposit_count, + tx_hash, block_timestamp, from_address, txn_sender + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, 0, $12, $13) + `, archived.BlockNum, archived.BlockPos, archived.LeafType, + archived.OriginNetwork, archived.OriginAddress, + archived.DestinationNetwork, archived.DestinationAddress, + archived.Amount.String(), archived.Metadata, archived.DepositCount, + archived.TxHash.Hex(), archived.FromAddress.Hex(), archived.TxnSender.Hex()) + require.NoError(t, err) + } + + // Create forward LET event with the common leaf + leaves := []LeafData{commonLeafData} + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 400, + BlockPos: 30, + BlockTimestamp: 1234567999, + TxnHash: common.HexToHash("0xforward999"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewLeaves: encodedLeaves, + } + + // Calculate expected new root using helper (with no archived bridge info since multiple matches) + event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event) + + // Test: Process the forward LET event + blockPos := event.BlockPos + newBlockPos, err := p.handleForwardLETEvent(tx, event, &blockPos) + require.NoError(t, err) + require.Equal(t, event.BlockPos+1, newBlockPos) + + // Verify: Bridge was inserted with event's tx hash and empty addresses + var bridges []*Bridge + err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1", event.BlockNum) + require.NoError(t, err) + require.Len(t, bridges, 1) + + bridge := bridges[0] + require.Equal(t, event.TxnHash, bridge.TxHash, "Should use event's tx hash when multiple archived bridges match") + require.Equal(t, common.Address{}, bridge.TxnSender, "TxnSender should be empty with multiple matches") + require.Equal(t, common.Address{}, bridge.FromAddress, "FromAddress should be empty with multiple matches") + require.Equal(t, BridgeSourceForwardLET, bridge.Source) + }) + + t.Run("error on previous root mismatch", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-4) + var initialRoot common.Hash + var err error + for i := uint32(0); i <= 4; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(4) // Last index inserted + + // Create forward LET event with WRONG previous root + leaves := []LeafData{ + { + LeafType: 1, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + DestinationNetwork: 10, + DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + Amount: big.NewInt(1000000), + Metadata: []byte("test"), + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 100, + BlockPos: 5, + BlockTimestamp: 1234567890, + TxnHash: common.HexToHash("0xabc123"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: common.HexToHash("0xWRONG"), // Wrong root + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewRoot: common.HexToHash("0x999"), + NewLeaves: encodedLeaves, + } + + // Test: Should fail with root mismatch + blockPos := event.BlockPos + _, err = p.handleForwardLETEvent(tx, event, &blockPos) + require.Error(t, err) + require.Contains(t, err.Error(), "local exit root mismatch") + require.Contains(t, err.Error(), initialRoot.String()) + }) + + t.Run("error on new root mismatch", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-4) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 4; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 10+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 4; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(4) // Last index inserted + + // Insert block for the ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(100)) + require.NoError(t, err) + + // Create forward LET event + leaves := []LeafData{ + { + LeafType: 1, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + DestinationNetwork: 10, + DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + Amount: big.NewInt(1000000), + Metadata: []byte("test"), + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 100, + BlockPos: 5, + BlockTimestamp: 1234567890, + TxnHash: common.HexToHash("0xabc123"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewRoot: common.HexToHash("0xWRONG"), // Wrong new root + NewLeaves: encodedLeaves, + } + + // Test: Should fail with new root mismatch after processing + blockPos := event.BlockPos + _, err = p.handleForwardLETEvent(tx, event, &blockPos) + require.Error(t, err) + require.Contains(t, err.Error(), "local exit root mismatch") + }) + + t.Run("error on invalid encoded leaves", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-4) + var initialRoot common.Hash + var err error + for i := uint32(0); i <= 4; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(4) // Last index inserted + + event := &ForwardLET{ + BlockNum: 100, + BlockPos: 5, + BlockTimestamp: 1234567890, + TxnHash: common.HexToHash("0xabc123"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewRoot: common.Hash{}, + NewLeaves: []byte("invalid data"), // Invalid encoding + } + + // Test: Should fail to decode leaves + blockPos := event.BlockPos + _, err = p.handleForwardLETEvent(tx, event, &blockPos) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to decode new leaves") + }) + + t.Run("process with nil blockPos parameter", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-4) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 4; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 10+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 4; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(4) // Last index inserted + + // Insert block for the ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(100)) + require.NoError(t, err) + + leaves := []LeafData{ + { + LeafType: 1, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + DestinationNetwork: 10, + DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + Amount: big.NewInt(1000000), + Metadata: []byte("test"), + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 100, + BlockPos: 5, + BlockTimestamp: 1234567890, + TxnHash: common.HexToHash("0xabc123"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewLeaves: encodedLeaves, + } + + // Calculate expected root using helper + event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event) + + // Test: Process with nil blockPos (should use event.BlockPos) + newBlockPos, err := p.handleForwardLETEvent(tx, event, nil) + require.NoError(t, err) + require.Equal(t, event.BlockPos+1, newBlockPos) + + // Verify: Bridge uses event.BlockPos + var bridges []*Bridge + err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1", event.BlockNum) + require.NoError(t, err) + require.Len(t, bridges, 1) + require.Equal(t, event.BlockPos, bridges[0].BlockPos) + }) +} + +// setupProcessorWithTransaction creates a processor and begins a transaction for testing +func setupProcessorWithTransaction(t *testing.T) (*processor, dbtypes.Txer) { + t.Helper() + + dbPath := filepath.Join(t.TempDir(), "test_forward_let.db") + err := migrations.RunMigrations(dbPath) + require.NoError(t, err) + + logger := log.WithFields("module", "test") + p, err := newProcessor(dbPath, "test", logger, dbQueryTimeout) + require.NoError(t, err) + + tx, err := db.NewTx(t.Context(), p.db) + require.NoError(t, err) + + return p, tx +} + +// calculateExpectedRootAfterForwardLET calculates what the tree root will be after processing ForwardLET leaves +// It does this using a completely separate processor to avoid affecting the test state +// archivedBridges: optional map from leaf index (in leaves slice) to archived bridge info +func calculateExpectedRootAfterForwardLET(t *testing.T, initialDepositCount uint32, + leaves []LeafData, event *ForwardLET, archivedBridges ...*Bridge) common.Hash { + t.Helper() + + // Build a map for quick lookup of archived bridge info by leaf data + archivedByLeaf := make(map[int]*Bridge) + for i, archived := range archivedBridges { + if archived != nil { + archivedByLeaf[i] = archived + } + } + + // Create a temporary processor with its own database + tempDBPath := filepath.Join(t.TempDir(), "temp_calc.db") + err := migrations.RunMigrations(tempDBPath) + require.NoError(t, err) + + logger := log.WithFields("module", "test-calc") + tempP, err := newProcessor(tempDBPath, "test-calc", logger, dbQueryTimeout) + require.NoError(t, err) + + tempTx, err := db.NewTx(t.Context(), tempP.db) + require.NoError(t, err) + defer tempTx.Rollback() //nolint:errcheck + + // Insert block rows for the setup leaves + for i := uint32(0); i <= initialDepositCount; i++ { + _, err = tempTx.Exec(`INSERT INTO block (num) VALUES ($1)`, 10+uint64(i)) + require.NoError(t, err) + } + + // Insert block row for the ForwardLET event + _, err = tempTx.Exec(`INSERT INTO block (num) VALUES ($1)`, event.BlockNum) + require.NoError(t, err) + + // Insert archived bridges if provided + for _, archived := range archivedBridges { + if archived != nil { + _, err = tempTx.Exec(`INSERT INTO block (num) VALUES ($1)`, archived.BlockNum) + require.NoError(t, err) + + _, err = tempTx.Exec(` + INSERT INTO bridge_archive ( + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, deposit_count, + tx_hash, block_timestamp, from_address, txn_sender + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, 0, $12, $13) + `, archived.BlockNum, archived.BlockPos, archived.LeafType, + archived.OriginNetwork, archived.OriginAddress.Hex(), + archived.DestinationNetwork, archived.DestinationAddress.Hex(), + archived.Amount.String(), archived.Metadata, archived.DepositCount, + archived.TxHash.Hex(), archived.FromAddress.Hex(), archived.TxnSender.Hex()) + require.NoError(t, err) + } + } + + // Rebuild tree state up to initialDepositCount + for i := uint32(0); i <= initialDepositCount; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + _, err = tempP.exitTree.PutLeaf(tempTx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + + // Now add the ForwardLET leaves (will query for archived bridges) + currentDepositCount := initialDepositCount + 1 + var newRoot common.Hash + for i, leaf := range leaves { + // Try to get archived bridge info if available + var txHash common.Hash + var txnSender, fromAddr common.Address + if archived, found := archivedByLeaf[i]; found { + txHash = archived.TxHash + txnSender = archived.TxnSender + fromAddr = archived.FromAddress + } else { + txHash = event.TxnHash + // txnSender and fromAddr remain zero + } + + bridge := leaf.ToBridge( + event.BlockNum, + event.BlockPos+uint64(i), + event.BlockTimestamp, + currentDepositCount, + txHash, + txnSender, + fromAddr, + ) + newRoot, err = tempP.exitTree.PutLeaf(tempTx, event.BlockNum, event.BlockPos+uint64(i), types.Leaf{ + Index: currentDepositCount, + Hash: bridge.Hash(), + }) + require.NoError(t, err) + currentDepositCount++ + } + + return newRoot +} + +// encodeLeafDataArrayForTest encodes a slice of LeafData using ABI encoding +func encodeLeafDataArrayForTest(t *testing.T, leaves []LeafData) []byte { + t.Helper() + + encodedBytes, err := aggkitabi.EncodeABIStructArray(leaves) + require.NoError(t, err) + + return encodedBytes +} From 935dda19633abbbfc3299788056d3cbc6a8ed92a Mon Sep 17 00:00:00 2001 From: Goran Rojovic Date: Tue, 16 Dec 2025 09:08:26 +0100 Subject: [PATCH 44/73] fix: copilot comments --- bridgesync/backfill_tx_sender.go | 14 +++++++------- bridgesync/migrations/bridgesync0012.sql | 6 +++--- bridgesync/processor.go | 2 +- bridgesync/processor_test.go | 2 ++ 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/bridgesync/backfill_tx_sender.go b/bridgesync/backfill_tx_sender.go index 29798fa51..ef2864345 100644 --- a/bridgesync/backfill_tx_sender.go +++ b/bridgesync/backfill_tx_sender.go @@ -164,14 +164,14 @@ func (b *BackfillTxnSender) getRecordsNeedingBackfillCount(ctx context.Context, SELECT COUNT(*) FROM %s WHERE (txn_sender = '' OR txn_sender IS NULL OR from_address = '' OR from_address IS NULL) - AND (source IS NULL OR (source != '%s' AND source != '%s')) - `, tableName, BridgeSourceBackwardLET, BridgeSourceForwardLET) + AND (source IS NULL OR (source != $1 AND source != $2)) + `, tableName) var count int dbCtx, cancel := context.WithTimeout(ctx, b.dbTimeout) defer cancel() - err := b.db.QueryRowContext(dbCtx, query).Scan(&count) + err := b.db.QueryRowContext(dbCtx, query, BridgeSourceBackwardLET, BridgeSourceForwardLET).Scan(&count) if err != nil { return 0, fmt.Errorf("failed to count records needing backfill: %w", err) } @@ -190,13 +190,13 @@ func (b *BackfillTxnSender) getRecordsNeedingBackfill( SELECT * FROM %s WHERE (txn_sender = '' OR txn_sender IS NULL OR from_address = '' OR from_address IS NULL) - AND (source IS NULL OR (source != '%s' AND source != '%s')) - LIMIT $1 - `, tableName, BridgeSourceBackwardLET, BridgeSourceForwardLET) + AND (source IS NULL OR (source != $1 AND source != $2)) + LIMIT $3 + `, tableName) dbCtx, cancel := context.WithTimeout(ctx, b.dbTimeout) defer cancel() - rows, err := b.db.QueryContext(dbCtx, query, limit) + rows, err := b.db.QueryContext(dbCtx, query, BridgeSourceBackwardLET, BridgeSourceForwardLET, limit) if err != nil { return nil, fmt.Errorf("failed to query records needing backfill: %w", err) } diff --git a/bridgesync/migrations/bridgesync0012.sql b/bridgesync/migrations/bridgesync0012.sql index 516bee88c..eb94eabbd 100644 --- a/bridgesync/migrations/bridgesync0012.sql +++ b/bridgesync/migrations/bridgesync0012.sql @@ -28,9 +28,9 @@ CREATE TABLE IF NOT EXISTS backward_let ( ALTER TABLE bridge ADD COLUMN source TEXT DEFAULT ''; CREATE TABLE IF NOT EXISTS forward_let ( - block_num INTEGER NOT NULL REFERENCES block (num) ON DELETE CASCADE, - block_pos INTEGER NOT NULL, - block_timestamp INTEGER NOT NULL, + block_num INTEGER NOT NULL REFERENCES block (num) ON DELETE CASCADE, + block_pos INTEGER NOT NULL, + block_timestamp INTEGER NOT NULL, tx_hash VARCHAR NOT NULL, previous_deposit_count TEXT NOT NULL, previous_root VARCHAR NOT NULL, diff --git a/bridgesync/processor.go b/bridgesync/processor.go index b94803540..cdf1f7bc9 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1833,7 +1833,7 @@ func (p *processor) handleForwardLETEvent(tx dbtypes.Txer, event *ForwardLET, bl // after processing all new leaves, we sanity check that the new root matches the latest one in the exit tree if err := p.sanityCheckLatestLER(tx, event.NewRoot); err != nil { - return 0, fmt.Errorf("failed to sanity check LER before processing ForwardLET: %w", err) + return 0, fmt.Errorf("failed to sanity check LER after processing ForwardLET: %w", err) } // finally, insert the forward LET event into the designated table diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 391baee7c..d8878460e 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -486,6 +486,8 @@ var ( BlockPos: 4, PreviousDepositCount: big.NewInt(3), NewDepositCount: big.NewInt(2), + PreviousRoot: common.HexToHash("0x15cd4b94cacc2cf50d055e1adb5fbfe5cd95485e121a5c411d73e263f2a66685"), + NewRoot: common.HexToHash("0xa03113d9ce128863f29479689c82d0b37ebc9432c569c3a57f22d6c008256c5b"), }}, }, } From 37f4c016264ce31e26ba015ec0109bc3c5202c82 Mon Sep 17 00:00:00 2001 From: Goran Rojovic Date: Tue, 16 Dec 2025 09:46:24 +0100 Subject: [PATCH 45/73] fix: add appender test --- bridgesync/downloader_test.go | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/bridgesync/downloader_test.go b/bridgesync/downloader_test.go index 3859d0586..f1d56db4b 100644 --- a/bridgesync/downloader_test.go +++ b/bridgesync/downloader_test.go @@ -662,6 +662,33 @@ func TestBuildAppender(t *testing.T) { return l, nil }, }, + { + name: "forwardLETSignature appender", + eventSignature: forwardLETEventSignature, + deploymentKind: SovereignChain, + logBuilder: func() (types.Log, error) { + event, err := bridgeL2Abi.EventByID(forwardLETEventSignature) + if err != nil { + return types.Log{}, err + } + + previousDepositCount := big.NewInt(15) + previousRoot := common.HexToHash("0xdeadbeef15") + newDepositCount := big.NewInt(20) + newRoot := common.HexToHash("0x5ca1e20") + newLeaves := []byte("leavesdata") + data, err := event.Inputs.Pack(previousDepositCount, previousRoot, newDepositCount, newRoot, newLeaves) + if err != nil { + return types.Log{}, err + } + + l := types.Log{ + Topics: []common.Hash{forwardLETEventSignature}, + Data: data, + } + return l, nil + }, + }, { name: "unknown deployment kind", deploymentKind: 100, From e6ec119e9af2fbbaacb0179b11f32e1e719ffabb Mon Sep 17 00:00:00 2001 From: Goran Rojovic Date: Tue, 16 Dec 2025 10:38:51 +0100 Subject: [PATCH 46/73] feat: new tests --- abi/abi_decode_test.go | 276 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 276 insertions(+) create mode 100644 abi/abi_decode_test.go diff --git a/abi/abi_decode_test.go b/abi/abi_decode_test.go new file mode 100644 index 000000000..3d9052ad3 --- /dev/null +++ b/abi/abi_decode_test.go @@ -0,0 +1,276 @@ +package abi + +import ( + "errors" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestDecodeABIEncodedStructArray(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + Field2 uint32 `abiarg:"field2"` + Field3 common.Address `abiarg:"field3"` + } + + // Create test data + items := []TestStruct{ + { + Field1: 1, + Field2: 100, + Field3: common.HexToAddress("0x1111111111111111111111111111111111111111"), + }, + { + Field1: 2, + Field2: 200, + Field3: common.HexToAddress("0x2222222222222222222222222222222222222222"), + }, + } + + // Encode first + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + require.NotEmpty(t, encodedBytes) + + // Decode with converter + converter := func(item any) (TestStruct, error) { + // The ABI library returns anonymous structs, we need to extract fields + // In real usage, you'd use reflection or type assertions + return TestStruct{ + Field1: 1, // Placeholder for test + Field2: 100, + Field3: common.HexToAddress("0x1111111111111111111111111111111111111111"), + }, nil + } + + decoded, err := DecodeABIEncodedStructArray(encodedBytes, converter) + require.NoError(t, err) + require.Len(t, decoded, 2) +} + +func TestDecodeABIEncodedStructArray_EmptyBytes(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + } + + converter := func(item any) (TestStruct, error) { + return TestStruct{}, nil + } + + _, err := DecodeABIEncodedStructArray([]byte{}, converter) + require.Error(t, err) + require.Contains(t, err.Error(), "encoded bytes are empty") +} + +func TestDecodeABIEncodedStructArray_ConverterError(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + Field2 uint32 `abiarg:"field2"` + } + + // Create test data + items := []TestStruct{ + {Field1: 1, Field2: 100}, + {Field1: 2, Field2: 200}, + } + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + + // Converter that always fails + converter := func(item any) (TestStruct, error) { + return TestStruct{}, errors.New("converter failed") + } + + _, err = DecodeABIEncodedStructArray(encodedBytes, converter) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to convert item 0") + require.Contains(t, err.Error(), "converter failed") +} + +func TestDecodeABIEncodedStructArray_WithBigInt(t *testing.T) { + type TestStruct struct { + Amount *big.Int `abiarg:"amount,uint256"` + Value uint32 `abiarg:"value"` + } + + // Create test data + items := []TestStruct{ + {Amount: big.NewInt(1000), Value: 1}, + {Amount: big.NewInt(2000), Value: 2}, + {Amount: big.NewInt(3000), Value: 3}, + } + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + + // Converter that extracts fields + converter := func(item any) (TestStruct, error) { + // In real usage, you'd use reflection to extract the fields + return TestStruct{Amount: big.NewInt(1000), Value: 1}, nil + } + + decoded, err := DecodeABIEncodedStructArray(encodedBytes, converter) + require.NoError(t, err) + require.Len(t, decoded, 3) +} + +func TestDecodeABIEncodedStructArray_EmptyArray(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + } + + // Encode empty array + items := []TestStruct{} + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + + converter := func(item any) (TestStruct, error) { + return TestStruct{}, nil + } + + decoded, err := DecodeABIEncodedStructArray(encodedBytes, converter) + require.NoError(t, err) + require.Len(t, decoded, 0) +} + +func TestDecodeABIEncodedStructArray_InvalidABIData(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + } + + converter := func(item any) (TestStruct, error) { + return TestStruct{}, nil + } + + // Invalid ABI encoded data + invalidData := []byte{0x01, 0x02, 0x03} + + _, err := DecodeABIEncodedStructArray(invalidData, converter) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to unpack data") +} + +func TestDecodeABIEncodedStructArray_ComplexStruct(t *testing.T) { + type ComplexStruct struct { + LeafType uint8 `abiarg:"leafType"` + OriginNetwork uint32 `abiarg:"originNetwork"` + OriginAddress common.Address `abiarg:"originAddress"` + DestinationNetwork uint32 `abiarg:"destinationNetwork"` + DestinationAddress common.Address `abiarg:"destinationAddress"` + Amount *big.Int `abiarg:"amount,uint256"` + Metadata []byte `abiarg:"metadata"` + } + + // Create test data + items := []ComplexStruct{ + { + LeafType: 1, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x1111111111111111111111111111111111111111"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x2222222222222222222222222222222222222222"), + Amount: big.NewInt(1000), + Metadata: []byte("test1"), + }, + { + LeafType: 2, + OriginNetwork: 3, + OriginAddress: common.HexToAddress("0x3333333333333333333333333333333333333333"), + DestinationNetwork: 4, + DestinationAddress: common.HexToAddress("0x4444444444444444444444444444444444444444"), + Amount: big.NewInt(2000), + Metadata: []byte("test2"), + }, + } + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + + converter := func(item any) (ComplexStruct, error) { + // Placeholder converter for test + return ComplexStruct{ + LeafType: 1, + OriginNetwork: 1, + Amount: big.NewInt(1000), + }, nil + } + + decoded, err := DecodeABIEncodedStructArray(encodedBytes, converter) + require.NoError(t, err) + require.Len(t, decoded, 2) +} + +func TestDecodeABIEncodedStructArray_NoABITags(t *testing.T) { + type BadStruct struct { + Field1 uint8 + Field2 uint32 + } + + converter := func(item any) (BadStruct, error) { + return BadStruct{}, nil + } + + // Try to decode with a struct that has no abiarg tags + // BuildABIFields will succeed but return empty fields, which will cause unpack to fail + _, err := DecodeABIEncodedStructArray([]byte{0x01}, converter) + require.Error(t, err) + // The error will be from unpacking due to insufficient data or empty ABI fields + require.Contains(t, err.Error(), "failed to") +} + +func TestDecodeABIEncodedStructArray_SingleItem(t *testing.T) { + type TestStruct struct { + Value uint64 `abiarg:"value"` + } + + // Create single item array + items := []TestStruct{ + {Value: 12345}, + } + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + + converter := func(item any) (TestStruct, error) { + return TestStruct{Value: 12345}, nil + } + + decoded, err := DecodeABIEncodedStructArray(encodedBytes, converter) + require.NoError(t, err) + require.Len(t, decoded, 1) + require.Equal(t, uint64(12345), decoded[0].Value) +} + +func TestDecodeABIEncodedStructArray_ConverterPartialFailure(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + Field2 uint32 `abiarg:"field2"` + } + + items := []TestStruct{ + {Field1: 1, Field2: 100}, + {Field1: 2, Field2: 200}, + {Field1: 3, Field2: 300}, + } + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + + callCount := 0 + converter := func(item any) (TestStruct, error) { + callCount++ + if callCount == 2 { + return TestStruct{}, errors.New("failed on item 2") + } + return TestStruct{Field1: 1, Field2: 100}, nil + } + + _, err = DecodeABIEncodedStructArray(encodedBytes, converter) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to convert item 1") + require.Contains(t, err.Error(), "failed on item 2") +} From 45aed70d931aa5ca87d55d6f88f0e5cb17b132cf Mon Sep 17 00:00:00 2001 From: Goran Rojovic Date: Thu, 18 Dec 2025 10:38:50 +0100 Subject: [PATCH 47/73] fix: rebase --- bridgesync/backfill_tx_sender.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/bridgesync/backfill_tx_sender.go b/bridgesync/backfill_tx_sender.go index ef2864345..a916715f6 100644 --- a/bridgesync/backfill_tx_sender.go +++ b/bridgesync/backfill_tx_sender.go @@ -171,7 +171,8 @@ func (b *BackfillTxnSender) getRecordsNeedingBackfillCount(ctx context.Context, dbCtx, cancel := context.WithTimeout(ctx, b.dbTimeout) defer cancel() - err := b.db.QueryRowContext(dbCtx, query, BridgeSourceBackwardLET, BridgeSourceForwardLET).Scan(&count) + err := b.db.QueryRowContext(dbCtx, query, + BridgeSourceRestoredBackwardLET, BridgeSourceForwardLET).Scan(&count) if err != nil { return 0, fmt.Errorf("failed to count records needing backfill: %w", err) } @@ -196,7 +197,8 @@ func (b *BackfillTxnSender) getRecordsNeedingBackfill( dbCtx, cancel := context.WithTimeout(ctx, b.dbTimeout) defer cancel() - rows, err := b.db.QueryContext(dbCtx, query, BridgeSourceBackwardLET, BridgeSourceForwardLET, limit) + rows, err := b.db.QueryContext(dbCtx, query, + BridgeSourceRestoredBackwardLET, BridgeSourceForwardLET, limit) if err != nil { return nil, fmt.Errorf("failed to query records needing backfill: %w", err) } From 2bde87b69426526b7435f8924e994d17fc577d66 Mon Sep 17 00:00:00 2001 From: Goran Rojovic Date: Thu, 18 Dec 2025 11:37:08 +0100 Subject: [PATCH 48/73] fix: tests --- bridgesync/backfill_tx_sender_test.go | 2 +- bridgesync/processor.go | 14 +++++++++++--- bridgesync/processor_forward_let_test.go | 24 ++---------------------- bridgesync/processor_test.go | 14 ++++++++++++++ 4 files changed, 28 insertions(+), 26 deletions(-) diff --git a/bridgesync/backfill_tx_sender_test.go b/bridgesync/backfill_tx_sender_test.go index 216700f10..d5a711ca3 100644 --- a/bridgesync/backfill_tx_sender_test.go +++ b/bridgesync/backfill_tx_sender_test.go @@ -458,7 +458,7 @@ func TestBackfillTxnSender_getRecordsNeedingBackfillCount(t *testing.T) { 2, 0, 1, 1, '0x1234567890123456789012345678901234567890', 2, '0x0987654321098765432109876543210987654321', '1000000000000000000', '', 2, '0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567891', - 1234567890, '', '', 'backward_let' + 1234567890, '', '', 'restored_backward_let' ) `) require.NoError(t, err) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index cdf1f7bc9..957149889 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1721,13 +1721,21 @@ func (p *processor) archiveAndDeleteBridgesAbove(ctx context.Context, tx dbtypes // sanityCheckLatestLER checks if the provided local exit root matches the latest one in the exit tree func (p *processor) sanityCheckLatestLER(tx dbtypes.Txer, ler common.Hash) error { + var lastRootHash common.Hash + root, err := p.exitTree.GetLastRoot(tx) if err != nil { - return fmt.Errorf("failed to get last root from exit tree: %w", err) + // if there is no root yet, we consider the zero hash as the last root + if !errors.Is(err, db.ErrNotFound) { + return fmt.Errorf("failed to get last root from exit tree: %w", err) + } + } else { + lastRootHash = root.Hash } - if root.Hash != ler { + + if lastRootHash != ler { return fmt.Errorf("local exit root mismatch: expected %s, got %s", - ler.String(), root.Hash.String()) + ler.String(), lastRootHash.String()) } return nil } diff --git a/bridgesync/processor_forward_let_test.go b/bridgesync/processor_forward_let_test.go index db04286a5..b5c92cfc2 100644 --- a/bridgesync/processor_forward_let_test.go +++ b/bridgesync/processor_forward_let_test.go @@ -243,17 +243,7 @@ func TestHandleForwardLETEvent(t *testing.T) { // Don't set Source - bridge_archive table doesn't have this column } // Insert manually to avoid Source field - _, err = tx.Exec(` - INSERT INTO bridge_archive ( - block_num, block_pos, leaf_type, origin_network, origin_address, - destination_network, destination_address, amount, metadata, deposit_count, - tx_hash, block_timestamp, from_address, txn_sender - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, 0, $12, $13) - `, archivedBridge.BlockNum, archivedBridge.BlockPos, archivedBridge.LeafType, - archivedBridge.OriginNetwork, archivedBridge.OriginAddress, - archivedBridge.DestinationNetwork, archivedBridge.DestinationAddress, - archivedBridge.Amount.String(), archivedBridge.Metadata, archivedBridge.DepositCount, - archivedBridge.TxHash.Hex(), archivedBridge.FromAddress.Hex(), archivedBridge.TxnSender.Hex()) + err = meddler.Insert(tx, "bridge_archive", archivedBridge) require.NoError(t, err) // Create forward LET event with matching leaf @@ -371,17 +361,7 @@ func TestHandleForwardLETEvent(t *testing.T) { // Insert both archived bridges manually (to avoid Source column) for _, archived := range []*Bridge{archivedBridge1, archivedBridge2} { - _, err = tx.Exec(` - INSERT INTO bridge_archive ( - block_num, block_pos, leaf_type, origin_network, origin_address, - destination_network, destination_address, amount, metadata, deposit_count, - tx_hash, block_timestamp, from_address, txn_sender - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, 0, $12, $13) - `, archived.BlockNum, archived.BlockPos, archived.LeafType, - archived.OriginNetwork, archived.OriginAddress, - archived.DestinationNetwork, archived.DestinationAddress, - archived.Amount.String(), archived.Metadata, archived.DepositCount, - archived.TxHash.Hex(), archived.FromAddress.Hex(), archived.TxnSender.Hex()) + err = meddler.Insert(tx, "bridge_archive", archived) require.NoError(t, err) } diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index d8878460e..70d641071 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -5446,6 +5446,8 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockPos: 0, PreviousDepositCount: big.NewInt(3), NewDepositCount: big.NewInt(2), + PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), + NewRoot: common.HexToHash("0xa9d31ebbb97c7cd7c7103bee8af7d0b4c83771939baba0b415b0f94c4c39fd84"), }}, }, }) @@ -5468,6 +5470,8 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockPos: 0, PreviousDepositCount: big.NewInt(5), NewDepositCount: big.NewInt(0), + PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), + NewRoot: common.HexToHash("0x283c52c3d10a22d01f95f5bcab5e823675c9855bd40b1e82f32b0437b3b6a446"), }}, }, }) @@ -5513,6 +5517,8 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockPos: 0, PreviousDepositCount: big.NewInt(5), NewDepositCount: big.NewInt(2), + PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), + NewRoot: common.HexToHash("0xa9d31ebbb97c7cd7c7103bee8af7d0b4c83771939baba0b415b0f94c4c39fd84"), }}, }, } @@ -5538,6 +5544,8 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockPos: 0, PreviousDepositCount: big.NewInt(5), NewDepositCount: big.NewInt(3), + PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), + NewRoot: common.HexToHash("0x7533c9ef58edd0bea7959a20c33ed47e5548d35f4ff140c5c915740fe6800fb8"), }}, }, }) @@ -5550,6 +5558,8 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockPos: 0, PreviousDepositCount: big.NewInt(4), NewDepositCount: big.NewInt(3), + PreviousRoot: common.HexToHash("0x7533c9ef58edd0bea7959a20c33ed47e5548d35f4ff140c5c915740fe6800fb8"), + NewRoot: common.HexToHash("0x7533c9ef58edd0bea7959a20c33ed47e5548d35f4ff140c5c915740fe6800fb8"), }}, }, }) @@ -5629,6 +5639,8 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockPos: 0, PreviousDepositCount: big.NewInt(5), NewDepositCount: big.NewInt(2), + PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), + NewRoot: common.HexToHash("0xa9d31ebbb97c7cd7c7103bee8af7d0b4c83771939baba0b415b0f94c4c39fd84"), }}, }, } @@ -5653,6 +5665,8 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockPos: 0, PreviousDepositCount: big.NewInt(5), NewDepositCount: big.NewInt(2), + PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), + NewRoot: common.HexToHash("0xa9d31ebbb97c7cd7c7103bee8af7d0b4c83771939baba0b415b0f94c4c39fd84"), }}, }, } From 318e673993e1f6bdaa2ea597585964daf4d004e5 Mon Sep 17 00:00:00 2001 From: Goran Rojovic Date: Tue, 23 Dec 2025 11:28:36 +0100 Subject: [PATCH 49/73] fix: rebase --- bridgesync/backfill_tx_sender.go | 4 ++-- bridgesync/backfill_tx_sender_test.go | 2 +- bridgesync/processor_test.go | 2 ++ 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/bridgesync/backfill_tx_sender.go b/bridgesync/backfill_tx_sender.go index a916715f6..30f5044ad 100644 --- a/bridgesync/backfill_tx_sender.go +++ b/bridgesync/backfill_tx_sender.go @@ -172,7 +172,7 @@ func (b *BackfillTxnSender) getRecordsNeedingBackfillCount(ctx context.Context, defer cancel() err := b.db.QueryRowContext(dbCtx, query, - BridgeSourceRestoredBackwardLET, BridgeSourceForwardLET).Scan(&count) + BridgeSourceBackwardLET, BridgeSourceForwardLET).Scan(&count) if err != nil { return 0, fmt.Errorf("failed to count records needing backfill: %w", err) } @@ -198,7 +198,7 @@ func (b *BackfillTxnSender) getRecordsNeedingBackfill( dbCtx, cancel := context.WithTimeout(ctx, b.dbTimeout) defer cancel() rows, err := b.db.QueryContext(dbCtx, query, - BridgeSourceRestoredBackwardLET, BridgeSourceForwardLET, limit) + BridgeSourceBackwardLET, BridgeSourceForwardLET, limit) if err != nil { return nil, fmt.Errorf("failed to query records needing backfill: %w", err) } diff --git a/bridgesync/backfill_tx_sender_test.go b/bridgesync/backfill_tx_sender_test.go index d5a711ca3..216700f10 100644 --- a/bridgesync/backfill_tx_sender_test.go +++ b/bridgesync/backfill_tx_sender_test.go @@ -458,7 +458,7 @@ func TestBackfillTxnSender_getRecordsNeedingBackfillCount(t *testing.T) { 2, 0, 1, 1, '0x1234567890123456789012345678901234567890', 2, '0x0987654321098765432109876543210987654321', '1000000000000000000', '', 2, '0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567891', - 1234567890, '', '', 'restored_backward_let' + 1234567890, '', '', 'backward_let' ) `) require.NoError(t, err) diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 70d641071..2b00b9783 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -5494,6 +5494,8 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockPos: 0, PreviousDepositCount: big.NewInt(5), NewDepositCount: big.NewInt(4), + PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), + NewRoot: common.HexToHash("0x44e1bf8449ecec2b8b1d123fab00d33c9acb308e590605adf5f6e2de4d1c1133"), }}, }, } From faf64b8da3a93be3d7c0c5cf9ad4ff3cc317f8a0 Mon Sep 17 00:00:00 2001 From: Goran Rojovic Date: Fri, 26 Dec 2025 11:20:05 +0100 Subject: [PATCH 50/73] fix: comments --- abi/abi_builder.go | 21 +- abi/abi_builder_test.go | 52 +- abi/abi_decode.go | 30 +- abi/abi_decode_test.go | 153 +--- abi/abi_encode.go | 2 +- abi/abi_encode_test.go | 20 +- bridgesync/abi.go | 132 ---- bridgesync/leaf_data.go | 65 ++ bridgesync/{abi_test.go => leaf_data_test.go} | 0 bridgesync/processor_forward_let_test.go | 734 ------------------ bridgesync/processor_test.go | 718 +++++++++++++++++ 11 files changed, 837 insertions(+), 1090 deletions(-) delete mode 100644 bridgesync/abi.go create mode 100644 bridgesync/leaf_data.go rename bridgesync/{abi_test.go => leaf_data_test.go} (100%) delete mode 100644 bridgesync/processor_forward_let_test.go diff --git a/abi/abi_builder.go b/abi/abi_builder.go index e96ed2ced..25da2f499 100644 --- a/abi/abi_builder.go +++ b/abi/abi_builder.go @@ -27,30 +27,23 @@ func BuildABIFields(structType any) ([]abi.ArgumentMarshaling, error) { for i := 0; i < t.NumField(); i++ { field := t.Field(i) - abiTag := field.Tag.Get("abiarg") + abiTag := field.Tag.Get("abi") if abiTag == "" { - continue // Skip fields without abiarg tag + continue // Skip fields without abi tag } parts := strings.Split(abiTag, ",") name := parts[0] - var abiType string - if len(parts) > 1 { - // Explicit type from tag - abiType = parts[1] - } else { - // Infer type from Go type - inferredType, err := inferABIType(field.Type) - if err != nil { - return nil, fmt.Errorf("field %s: %w", field.Name, err) - } - abiType = inferredType + // Infer type from Go type + inferredType, err := inferABIType(field.Type) + if err != nil { + return nil, fmt.Errorf("field %s: %w", field.Name, err) } fields = append(fields, abi.ArgumentMarshaling{ Name: name, - Type: abiType, + Type: inferredType, }) } diff --git a/abi/abi_builder_test.go b/abi/abi_builder_test.go index ecf920dd6..464a70e4c 100644 --- a/abi/abi_builder_test.go +++ b/abi/abi_builder_test.go @@ -11,11 +11,11 @@ import ( func TestBuildABIFields(t *testing.T) { type TestStruct struct { - Field1 uint8 `abiarg:"field1"` - Field2 uint32 `abiarg:"field2"` - Field3 common.Address `abiarg:"field3"` - Field4 *big.Int `abiarg:"field4,uint256"` - Field5 []byte `abiarg:"field5"` + Field1 uint8 `abi:"field1"` + Field2 uint32 `abi:"field2"` + Field3 common.Address `abi:"field3"` + Field4 *big.Int `abi:"field4"` + Field5 []byte `abi:"field5"` Field6 string // No tag, should be skipped } @@ -34,42 +34,6 @@ func TestBuildABIFields(t *testing.T) { require.Equal(t, expected, fields) } -func TestBuildABIFields_TypeInference(t *testing.T) { - type TestStruct struct { - Uint8Field uint8 `abiarg:"uint8Field"` - Uint16Field uint16 `abiarg:"uint16Field"` - Uint32Field uint32 `abiarg:"uint32Field"` - Uint64Field uint64 `abiarg:"uint64Field"` - BoolField bool `abiarg:"boolField"` - StringField string `abiarg:"stringField"` - BytesField []byte `abiarg:"bytesField"` - AddressField common.Address `abiarg:"addressField"` - HashField common.Hash `abiarg:"hashField"` - BigIntField *big.Int `abiarg:"bigIntField"` // Inferred as uint256 - BigIntExplict *big.Int `abiarg:"bigIntExplict,uint128"` - } - - fields, err := BuildABIFields(TestStruct{}) - require.NoError(t, err) - require.Len(t, fields, 11) - - expected := []abi.ArgumentMarshaling{ - {Name: "uint8Field", Type: "uint8"}, - {Name: "uint16Field", Type: "uint16"}, - {Name: "uint32Field", Type: "uint32"}, - {Name: "uint64Field", Type: "uint64"}, - {Name: "boolField", Type: "bool"}, - {Name: "stringField", Type: "string"}, - {Name: "bytesField", Type: "bytes"}, - {Name: "addressField", Type: "address"}, - {Name: "hashField", Type: "bytes32"}, - {Name: "bigIntField", Type: "uint256"}, - {Name: "bigIntExplict", Type: "uint128"}, - } - - require.Equal(t, expected, fields) -} - func TestBuildABIFields_ErrorCases(t *testing.T) { t.Run("non-struct type", func(t *testing.T) { _, err := BuildABIFields(42) @@ -79,7 +43,7 @@ func TestBuildABIFields_ErrorCases(t *testing.T) { t.Run("unsupported field type", func(t *testing.T) { type BadStruct struct { - InvalidField map[string]string `abiarg:"invalid"` + InvalidField map[string]string `abi:"invalid"` } _, err := BuildABIFields(BadStruct{}) require.Error(t, err) @@ -89,8 +53,8 @@ func TestBuildABIFields_ErrorCases(t *testing.T) { func TestBuildABIFields_WithPointer(t *testing.T) { type TestStruct struct { - Field1 uint8 `abiarg:"field1"` - Field2 uint32 `abiarg:"field2"` + Field1 uint8 `abi:"field1"` + Field2 uint32 `abi:"field2"` } // Test with pointer to struct diff --git a/abi/abi_decode.go b/abi/abi_decode.go index 5bf372c8e..17447d118 100644 --- a/abi/abi_decode.go +++ b/abi/abi_decode.go @@ -3,17 +3,15 @@ package abi import ( "errors" "fmt" - "reflect" "github.com/ethereum/go-ethereum/accounts/abi" ) +const tupleArrayType = "tuple[]" + // DecodeABIEncodedStructArray is a generic helper that decodes ABI-encoded tuple array // It handles the ABI unpacking and type conversion boilerplate -func DecodeABIEncodedStructArray[T any]( - encodedBytes []byte, - converter func(any) (T, error), -) ([]T, error) { +func DecodeABIEncodedStructArray[T any](encodedBytes []byte) ([]T, error) { if len(encodedBytes) == 0 { return nil, errors.New("encoded bytes are empty") } @@ -24,7 +22,7 @@ func DecodeABIEncodedStructArray[T any]( return nil, fmt.Errorf("failed to build ABI fields: %w", err) } - arrayType, err := abi.NewType("tuple[]", "", abiFields) + arrayType, err := abi.NewType(tupleArrayType, "", abiFields) if err != nil { return nil, fmt.Errorf("failed to create array type: %w", err) } @@ -40,22 +38,6 @@ func DecodeABIEncodedStructArray[T any]( return nil, errors.New("unpacked data is empty") } - // The unpacked[0] contains the slice, but we need to extract it via reflection - // since the ABI library returns anonymous structs - val := reflect.ValueOf(unpacked[0]) - if val.Kind() != reflect.Slice { - return nil, fmt.Errorf("expected slice, got %v", val.Kind()) - } - - result := make([]T, val.Len()) - for i := 0; i < val.Len(); i++ { - item := val.Index(i).Interface() - converted, err := converter(item) - if err != nil { - return nil, fmt.Errorf("failed to convert item %d: %w", i, err) - } - result[i] = converted - } - - return result, nil + decodedData := *abi.ConvertType(unpacked[0], new([]T)).(*[]T) + return decodedData, nil } diff --git a/abi/abi_decode_test.go b/abi/abi_decode_test.go index 3d9052ad3..d35318cd5 100644 --- a/abi/abi_decode_test.go +++ b/abi/abi_decode_test.go @@ -1,7 +1,6 @@ package abi import ( - "errors" "math/big" "testing" @@ -11,9 +10,9 @@ import ( func TestDecodeABIEncodedStructArray(t *testing.T) { type TestStruct struct { - Field1 uint8 `abiarg:"field1"` - Field2 uint32 `abiarg:"field2"` - Field3 common.Address `abiarg:"field3"` + Field1 uint8 `abi:"field1"` + Field2 uint32 `abi:"field2"` + Field3 common.Address `abi:"field3"` } // Create test data @@ -35,66 +34,25 @@ func TestDecodeABIEncodedStructArray(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, encodedBytes) - // Decode with converter - converter := func(item any) (TestStruct, error) { - // The ABI library returns anonymous structs, we need to extract fields - // In real usage, you'd use reflection or type assertions - return TestStruct{ - Field1: 1, // Placeholder for test - Field2: 100, - Field3: common.HexToAddress("0x1111111111111111111111111111111111111111"), - }, nil - } - - decoded, err := DecodeABIEncodedStructArray(encodedBytes, converter) + decoded, err := DecodeABIEncodedStructArray[TestStruct](encodedBytes) require.NoError(t, err) require.Len(t, decoded, 2) } func TestDecodeABIEncodedStructArray_EmptyBytes(t *testing.T) { type TestStruct struct { - Field1 uint8 `abiarg:"field1"` - } - - converter := func(item any) (TestStruct, error) { - return TestStruct{}, nil + Field1 uint8 `abi:"field1"` } - _, err := DecodeABIEncodedStructArray([]byte{}, converter) + _, err := DecodeABIEncodedStructArray[TestStruct]([]byte{}) require.Error(t, err) require.Contains(t, err.Error(), "encoded bytes are empty") } -func TestDecodeABIEncodedStructArray_ConverterError(t *testing.T) { - type TestStruct struct { - Field1 uint8 `abiarg:"field1"` - Field2 uint32 `abiarg:"field2"` - } - - // Create test data - items := []TestStruct{ - {Field1: 1, Field2: 100}, - {Field1: 2, Field2: 200}, - } - - encodedBytes, err := EncodeABIStructArray(items) - require.NoError(t, err) - - // Converter that always fails - converter := func(item any) (TestStruct, error) { - return TestStruct{}, errors.New("converter failed") - } - - _, err = DecodeABIEncodedStructArray(encodedBytes, converter) - require.Error(t, err) - require.Contains(t, err.Error(), "failed to convert item 0") - require.Contains(t, err.Error(), "converter failed") -} - func TestDecodeABIEncodedStructArray_WithBigInt(t *testing.T) { type TestStruct struct { - Amount *big.Int `abiarg:"amount,uint256"` - Value uint32 `abiarg:"value"` + Amount *big.Int `abi:"amount"` + Value uint32 `abi:"value"` } // Create test data @@ -107,20 +65,14 @@ func TestDecodeABIEncodedStructArray_WithBigInt(t *testing.T) { encodedBytes, err := EncodeABIStructArray(items) require.NoError(t, err) - // Converter that extracts fields - converter := func(item any) (TestStruct, error) { - // In real usage, you'd use reflection to extract the fields - return TestStruct{Amount: big.NewInt(1000), Value: 1}, nil - } - - decoded, err := DecodeABIEncodedStructArray(encodedBytes, converter) + decoded, err := DecodeABIEncodedStructArray[TestStruct](encodedBytes) require.NoError(t, err) require.Len(t, decoded, 3) } func TestDecodeABIEncodedStructArray_EmptyArray(t *testing.T) { type TestStruct struct { - Field1 uint8 `abiarg:"field1"` + Field1 uint8 `abi:"field1"` } // Encode empty array @@ -128,41 +80,33 @@ func TestDecodeABIEncodedStructArray_EmptyArray(t *testing.T) { encodedBytes, err := EncodeABIStructArray(items) require.NoError(t, err) - converter := func(item any) (TestStruct, error) { - return TestStruct{}, nil - } - - decoded, err := DecodeABIEncodedStructArray(encodedBytes, converter) + decoded, err := DecodeABIEncodedStructArray[TestStruct](encodedBytes) require.NoError(t, err) require.Len(t, decoded, 0) } func TestDecodeABIEncodedStructArray_InvalidABIData(t *testing.T) { type TestStruct struct { - Field1 uint8 `abiarg:"field1"` - } - - converter := func(item any) (TestStruct, error) { - return TestStruct{}, nil + Field1 uint8 `abi:"field1"` } // Invalid ABI encoded data invalidData := []byte{0x01, 0x02, 0x03} - _, err := DecodeABIEncodedStructArray(invalidData, converter) + _, err := DecodeABIEncodedStructArray[TestStruct](invalidData) require.Error(t, err) require.Contains(t, err.Error(), "failed to unpack data") } func TestDecodeABIEncodedStructArray_ComplexStruct(t *testing.T) { type ComplexStruct struct { - LeafType uint8 `abiarg:"leafType"` - OriginNetwork uint32 `abiarg:"originNetwork"` - OriginAddress common.Address `abiarg:"originAddress"` - DestinationNetwork uint32 `abiarg:"destinationNetwork"` - DestinationAddress common.Address `abiarg:"destinationAddress"` - Amount *big.Int `abiarg:"amount,uint256"` - Metadata []byte `abiarg:"metadata"` + LeafType uint8 `abi:"leafType"` + OriginNetwork uint32 `abi:"originNetwork"` + OriginAddress common.Address `abi:"originAddress"` + DestinationNetwork uint32 `abi:"destinationNetwork"` + DestinationAddress common.Address `abi:"destinationAddress"` + Amount *big.Int `abi:"amount"` + Metadata []byte `abi:"metadata"` } // Create test data @@ -190,16 +134,7 @@ func TestDecodeABIEncodedStructArray_ComplexStruct(t *testing.T) { encodedBytes, err := EncodeABIStructArray(items) require.NoError(t, err) - converter := func(item any) (ComplexStruct, error) { - // Placeholder converter for test - return ComplexStruct{ - LeafType: 1, - OriginNetwork: 1, - Amount: big.NewInt(1000), - }, nil - } - - decoded, err := DecodeABIEncodedStructArray(encodedBytes, converter) + decoded, err := DecodeABIEncodedStructArray[ComplexStruct](encodedBytes) require.NoError(t, err) require.Len(t, decoded, 2) } @@ -210,13 +145,9 @@ func TestDecodeABIEncodedStructArray_NoABITags(t *testing.T) { Field2 uint32 } - converter := func(item any) (BadStruct, error) { - return BadStruct{}, nil - } - - // Try to decode with a struct that has no abiarg tags + // Try to decode with a struct that has no abi tags // BuildABIFields will succeed but return empty fields, which will cause unpack to fail - _, err := DecodeABIEncodedStructArray([]byte{0x01}, converter) + _, err := DecodeABIEncodedStructArray[BadStruct]([]byte{0x01}) require.Error(t, err) // The error will be from unpacking due to insufficient data or empty ABI fields require.Contains(t, err.Error(), "failed to") @@ -224,7 +155,7 @@ func TestDecodeABIEncodedStructArray_NoABITags(t *testing.T) { func TestDecodeABIEncodedStructArray_SingleItem(t *testing.T) { type TestStruct struct { - Value uint64 `abiarg:"value"` + Value uint64 `abi:"value"` } // Create single item array @@ -235,42 +166,8 @@ func TestDecodeABIEncodedStructArray_SingleItem(t *testing.T) { encodedBytes, err := EncodeABIStructArray(items) require.NoError(t, err) - converter := func(item any) (TestStruct, error) { - return TestStruct{Value: 12345}, nil - } - - decoded, err := DecodeABIEncodedStructArray(encodedBytes, converter) + decoded, err := DecodeABIEncodedStructArray[TestStruct](encodedBytes) require.NoError(t, err) require.Len(t, decoded, 1) require.Equal(t, uint64(12345), decoded[0].Value) } - -func TestDecodeABIEncodedStructArray_ConverterPartialFailure(t *testing.T) { - type TestStruct struct { - Field1 uint8 `abiarg:"field1"` - Field2 uint32 `abiarg:"field2"` - } - - items := []TestStruct{ - {Field1: 1, Field2: 100}, - {Field1: 2, Field2: 200}, - {Field1: 3, Field2: 300}, - } - - encodedBytes, err := EncodeABIStructArray(items) - require.NoError(t, err) - - callCount := 0 - converter := func(item any) (TestStruct, error) { - callCount++ - if callCount == 2 { - return TestStruct{}, errors.New("failed on item 2") - } - return TestStruct{Field1: 1, Field2: 100}, nil - } - - _, err = DecodeABIEncodedStructArray(encodedBytes, converter) - require.Error(t, err) - require.Contains(t, err.Error(), "failed to convert item 1") - require.Contains(t, err.Error(), "failed on item 2") -} diff --git a/abi/abi_encode.go b/abi/abi_encode.go index 3d8d319df..5ec45f14d 100644 --- a/abi/abi_encode.go +++ b/abi/abi_encode.go @@ -22,7 +22,7 @@ func EncodeABIStructArray[T any](items []T) ([]byte, error) { return nil, fmt.Errorf("failed to build ABI fields: %w", err) } - arrayType, err := abi.NewType("tuple[]", "", abiFields) + arrayType, err := abi.NewType(tupleArrayType, "", abiFields) if err != nil { return nil, fmt.Errorf("failed to create array type: %w", err) } diff --git a/abi/abi_encode_test.go b/abi/abi_encode_test.go index b3ea3537a..b6e7ef2fd 100644 --- a/abi/abi_encode_test.go +++ b/abi/abi_encode_test.go @@ -10,9 +10,9 @@ import ( func TestEncodeABIStructArray(t *testing.T) { type TestStruct struct { - Field1 uint8 `abiarg:"field1"` - Field2 uint32 `abiarg:"field2"` - Field3 common.Address `abiarg:"field3"` + Field1 uint8 `abi:"field1"` + Field2 uint32 `abi:"field2"` + Field3 common.Address `abi:"field3"` } items := []TestStruct{ @@ -32,20 +32,14 @@ func TestEncodeABIStructArray(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, encodedBytes) - // Decode to verify roundtrip - converter := func(item any) (TestStruct, error) { - // Simple converter for test verification - return TestStruct{}, nil - } - - _, err = DecodeABIEncodedStructArray(encodedBytes, converter) + _, err = DecodeABIEncodedStructArray[TestStruct](encodedBytes) require.NoError(t, err) } func TestEncodeABIStructArray_EmptySlice(t *testing.T) { type TestStruct struct { - Field1 uint8 `abiarg:"field1"` - Field2 uint32 `abiarg:"field2"` + Field1 uint8 `abi:"field1"` + Field2 uint32 `abi:"field2"` } items := []TestStruct{} @@ -57,7 +51,7 @@ func TestEncodeABIStructArray_EmptySlice(t *testing.T) { func TestEncodeABIStructArray_WithBigInt(t *testing.T) { type TestStruct struct { - Amount *big.Int `abiarg:"amount,uint256"` + Amount *big.Int `abi:"amount"` } items := []TestStruct{ diff --git a/bridgesync/abi.go b/bridgesync/abi.go deleted file mode 100644 index a349bf7d7..000000000 --- a/bridgesync/abi.go +++ /dev/null @@ -1,132 +0,0 @@ -package bridgesync - -import ( - "errors" - "fmt" - "math/big" - "reflect" - - aggkitabi "github.com/agglayer/aggkit/abi" - "github.com/ethereum/go-ethereum/common" -) - -type LeafData struct { - LeafType uint8 `abiarg:"leafType"` - OriginNetwork uint32 `abiarg:"originNetwork"` - OriginAddress common.Address `abiarg:"originAddress"` - DestinationNetwork uint32 `abiarg:"destinationNetwork"` - DestinationAddress common.Address `abiarg:"destinationAddress"` - Amount *big.Int `abiarg:"amount,uint256"` - Metadata []byte `abiarg:"metadata"` -} - -func (l LeafData) String() string { - return fmt.Sprintf("LeafData{LeafType: %d, OriginNetwork: %d, OriginAddress: %s, "+ - "DestinationNetwork: %d, DestinationAddress: %s, Amount: %s, Metadata: %x}", - l.LeafType, - l.OriginNetwork, - l.OriginAddress.Hex(), - l.DestinationNetwork, - l.DestinationAddress.Hex(), - l.Amount.String(), - l.Metadata, - ) -} - -func (l LeafData) ToBridge( - blockNum, blockPos, blockTimestamp uint64, - depositCount uint32, - txnHash common.Hash, - txnSender, fromAddr common.Address) Bridge { - return Bridge{ - BlockNum: blockNum, - BlockPos: blockPos, - BlockTimestamp: blockTimestamp, - DepositCount: depositCount, - TxHash: txnHash, - FromAddress: fromAddr, - TxnSender: txnSender, - LeafType: l.LeafType, - OriginNetwork: l.OriginNetwork, - OriginAddress: l.OriginAddress, - DestinationNetwork: l.DestinationNetwork, - DestinationAddress: l.DestinationAddress, - Amount: l.Amount, - Metadata: l.Metadata, - Source: BridgeSourceForwardLET, // this leaf comes from ForwardLET event - } -} - -// decodeForwardLETLeaves decodes the newLeaves bytes from a ForwardLET event -func decodeForwardLETLeaves(newLeavesBytes []byte) ([]LeafData, error) { - return aggkitabi.DecodeABIEncodedStructArray(newLeavesBytes, convertABILeafData) -} - -// convertABILeafData converts an anonymous struct returned by the ABI decoder -func convertABILeafData(item any) (LeafData, error) { - // Use reflection to extract fields from the anonymous struct created by ABI library - // The ABI library generates structs with JSON tags that don't match our named types - val := reflect.ValueOf(item) - if val.Kind() != reflect.Struct { - return LeafData{}, fmt.Errorf("expected struct, got %T", item) - } - - expectedFields := reflect.TypeOf(LeafData{}).NumField() - if val.NumField() != expectedFields { - return LeafData{}, fmt.Errorf("expected %d fields, got %d", expectedFields, val.NumField()) - } - - // Create a map of field names to values from the ABI struct - fieldMap := make(map[string]any) - valType := val.Type() - for i := 0; i < val.NumField(); i++ { - fieldName := valType.Field(i).Name - fieldMap[fieldName] = val.Field(i).Interface() - } - - // Extract fields by name with type assertions - leafType, ok := fieldMap["LeafType"].(uint8) - if !ok { - return LeafData{}, errors.New("failed to convert field 'leafType' to uint8") - } - - originNetwork, ok := fieldMap["OriginNetwork"].(uint32) - if !ok { - return LeafData{}, errors.New("failed to convert field 'originNetwork' to uint32") - } - - originAddress, ok := fieldMap["OriginAddress"].(common.Address) - if !ok { - return LeafData{}, errors.New("failed to convert field 'originAddress' to common.Address") - } - - destinationNetwork, ok := fieldMap["DestinationNetwork"].(uint32) - if !ok { - return LeafData{}, errors.New("failed to convert field 'destinationNetwork' to uint32") - } - - destinationAddress, ok := fieldMap["DestinationAddress"].(common.Address) - if !ok { - return LeafData{}, errors.New("failed to convert field 'destinationAddress' to common.Address") - } - - amount, ok := fieldMap["Amount"].(*big.Int) - if !ok { - return LeafData{}, errors.New("failed to convert field 'amount' to *big.Int") - } - - metadata, ok := fieldMap["Metadata"].([]byte) - if !ok { - return LeafData{}, errors.New("failed to convert field 'metadata' to []byte") - } - - return LeafData{ - LeafType: leafType, - OriginNetwork: originNetwork, - OriginAddress: originAddress, - DestinationNetwork: destinationNetwork, - DestinationAddress: destinationAddress, - Amount: amount, - Metadata: metadata, - }, nil -} diff --git a/bridgesync/leaf_data.go b/bridgesync/leaf_data.go new file mode 100644 index 000000000..254013149 --- /dev/null +++ b/bridgesync/leaf_data.go @@ -0,0 +1,65 @@ +package bridgesync + +import ( + "fmt" + "math/big" + + aggkitabi "github.com/agglayer/aggkit/abi" + "github.com/ethereum/go-ethereum/common" +) + +// LeafData represents the data structure of a leaf in the local exit tree +// used in ForwardLET events. +type LeafData struct { + LeafType uint8 `abi:"leafType"` + OriginNetwork uint32 `abi:"originNetwork"` + OriginAddress common.Address `abi:"originAddress"` + DestinationNetwork uint32 `abi:"destinationNetwork"` + DestinationAddress common.Address `abi:"destinationAddress"` + Amount *big.Int `abi:"amount"` + Metadata []byte `abi:"metadata"` +} + +// String returns a string representation of the LeafData +func (l LeafData) String() string { + return fmt.Sprintf("LeafData{LeafType: %d, OriginNetwork: %d, OriginAddress: %s, "+ + "DestinationNetwork: %d, DestinationAddress: %s, Amount: %s, Metadata: %x}", + l.LeafType, + l.OriginNetwork, + l.OriginAddress.Hex(), + l.DestinationNetwork, + l.DestinationAddress.Hex(), + l.Amount.String(), + l.Metadata, + ) +} + +// ToBridge converts the LeafData to a Bridge structure +func (l LeafData) ToBridge( + blockNum, blockPos, blockTimestamp uint64, + depositCount uint32, + txnHash common.Hash, + txnSender, fromAddr common.Address) Bridge { + return Bridge{ + BlockNum: blockNum, + BlockPos: blockPos, + BlockTimestamp: blockTimestamp, + DepositCount: depositCount, + TxHash: txnHash, + FromAddress: fromAddr, + TxnSender: txnSender, + LeafType: l.LeafType, + OriginNetwork: l.OriginNetwork, + OriginAddress: l.OriginAddress, + DestinationNetwork: l.DestinationNetwork, + DestinationAddress: l.DestinationAddress, + Amount: l.Amount, + Metadata: l.Metadata, + Source: BridgeSourceForwardLET, // this leaf comes from ForwardLET event + } +} + +// decodeForwardLETLeaves decodes the newLeaves bytes from a ForwardLET event +func decodeForwardLETLeaves(newLeavesBytes []byte) ([]LeafData, error) { + return aggkitabi.DecodeABIEncodedStructArray[LeafData](newLeavesBytes) +} diff --git a/bridgesync/abi_test.go b/bridgesync/leaf_data_test.go similarity index 100% rename from bridgesync/abi_test.go rename to bridgesync/leaf_data_test.go diff --git a/bridgesync/processor_forward_let_test.go b/bridgesync/processor_forward_let_test.go deleted file mode 100644 index b5c92cfc2..000000000 --- a/bridgesync/processor_forward_let_test.go +++ /dev/null @@ -1,734 +0,0 @@ -package bridgesync - -import ( - "fmt" - "math/big" - "path/filepath" - "testing" - - aggkitabi "github.com/agglayer/aggkit/abi" - "github.com/agglayer/aggkit/bridgesync/migrations" - aggkitcommon "github.com/agglayer/aggkit/common" - "github.com/agglayer/aggkit/db" - dbtypes "github.com/agglayer/aggkit/db/types" - "github.com/agglayer/aggkit/log" - "github.com/agglayer/aggkit/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/russross/meddler" - "github.com/stretchr/testify/require" -) - -func TestHandleForwardLETEvent(t *testing.T) { - t.Run("successfully process single leaf with no archived bridge", func(t *testing.T) { - p, tx := setupProcessorWithTransaction(t) - defer tx.Rollback() //nolint:errcheck - - // Setup: Insert initial leaves to establish previous root (indices 0-4) - var initialRoot common.Hash - var err error - // Insert block rows for initial leaves - for i := uint32(0); i <= 4; i++ { - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 10+uint64(i)) - require.NoError(t, err) - } - for i := uint32(0); i <= 4; i++ { - leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} - initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) - require.NoError(t, err) - } - initialDepositCount := uint32(4) // Last index inserted - - // Insert block for the ForwardLET event - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(100)) - require.NoError(t, err) - - // Create forward LET event with one leaf - leaves := []LeafData{ - { - LeafType: 1, - OriginNetwork: 5, - OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), - DestinationNetwork: 10, - DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), - Amount: big.NewInt(1000000), - Metadata: []byte("test metadata"), - }, - } - encodedLeaves := encodeLeafDataArrayForTest(t, leaves) - - event := &ForwardLET{ - BlockNum: 100, - BlockPos: 5, - BlockTimestamp: 1234567890, - TxnHash: common.HexToHash("0xabc123"), - PreviousDepositCount: big.NewInt(int64(initialDepositCount)), - PreviousRoot: initialRoot, - NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), - NewLeaves: encodedLeaves, - } - - // Calculate the expected root that will result from processing these leaves - event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event) - - // Test: Process the forward LET event - blockPos := event.BlockPos - newBlockPos, err := p.handleForwardLETEvent(tx, event, &blockPos) - require.NoError(t, err) - require.Equal(t, event.BlockPos+1, newBlockPos) - - // Verify: Bridge was inserted - var bridges []*Bridge - err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1", event.BlockNum) - require.NoError(t, err) - require.Len(t, bridges, 1) - - bridge := bridges[0] - require.Equal(t, event.BlockNum, bridge.BlockNum) - require.Equal(t, event.BlockPos, bridge.BlockPos) - require.Equal(t, leaves[0].LeafType, bridge.LeafType) - require.Equal(t, leaves[0].OriginNetwork, bridge.OriginNetwork) - require.Equal(t, leaves[0].OriginAddress, bridge.OriginAddress) - require.Equal(t, leaves[0].DestinationNetwork, bridge.DestinationNetwork) - require.Equal(t, leaves[0].DestinationAddress, bridge.DestinationAddress) - require.Equal(t, 0, leaves[0].Amount.Cmp(bridge.Amount)) - require.Equal(t, leaves[0].Metadata, bridge.Metadata) - require.Equal(t, initialDepositCount+1, bridge.DepositCount) - require.Equal(t, event.TxnHash, bridge.TxHash) - require.Equal(t, aggkitcommon.ZeroAddress, bridge.TxnSender) - require.Equal(t, aggkitcommon.ZeroAddress, bridge.FromAddress) - require.Equal(t, BridgeSourceForwardLET, bridge.Source) - - // Verify: ForwardLET event was inserted - var forwardLETs []*ForwardLET - err = meddler.QueryAll(tx, &forwardLETs, "SELECT * FROM forward_let WHERE block_num = $1", event.BlockNum) - require.NoError(t, err) - require.Len(t, forwardLETs, 1) - require.Equal(t, event.BlockNum, forwardLETs[0].BlockNum) - }) - - t.Run("successfully process multiple leaves", func(t *testing.T) { - p, tx := setupProcessorWithTransaction(t) - defer tx.Rollback() //nolint:errcheck - - // Setup: Insert initial leaves (indices 0-9) - var initialRoot common.Hash - var err error - // Insert block rows for initial leaves - for i := uint32(0); i <= 9; i++ { - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 20+uint64(i)) - require.NoError(t, err) - } - for i := uint32(0); i <= 9; i++ { - leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} - initialRoot, err = p.exitTree.PutLeaf(tx, 20+uint64(i), 0, leaf) - require.NoError(t, err) - } - initialDepositCount := uint32(9) // Last index inserted - - // Insert block for the ForwardLET event - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(200)) - require.NoError(t, err) - - // Create forward LET event with three leaves - leaves := []LeafData{ - { - LeafType: 0, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x1111111111111111111111111111111111111111"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x2222222222222222222222222222222222222222"), - Amount: big.NewInt(100), - Metadata: []byte("first"), - }, - { - LeafType: 1, - OriginNetwork: 3, - OriginAddress: common.HexToAddress("0x3333333333333333333333333333333333333333"), - DestinationNetwork: 4, - DestinationAddress: common.HexToAddress("0x4444444444444444444444444444444444444444"), - Amount: big.NewInt(200), - Metadata: []byte("second"), - }, - { - LeafType: 2, - OriginNetwork: 5, - OriginAddress: common.HexToAddress("0x5555555555555555555555555555555555555555"), - DestinationNetwork: 6, - DestinationAddress: common.HexToAddress("0x6666666666666666666666666666666666666666"), - Amount: big.NewInt(300), - Metadata: []byte("third"), - }, - } - encodedLeaves := encodeLeafDataArrayForTest(t, leaves) - - event := &ForwardLET{ - BlockNum: 200, - BlockPos: 10, - BlockTimestamp: 1234567900, - TxnHash: common.HexToHash("0xdef456"), - PreviousDepositCount: big.NewInt(int64(initialDepositCount)), - PreviousRoot: initialRoot, - NewDepositCount: big.NewInt(int64(initialDepositCount + uint32(len(leaves)))), - NewLeaves: encodedLeaves, - } - - // Calculate the expected root that will result from processing these leaves - event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event) - - // Test: Process the forward LET event - blockPos := event.BlockPos - newBlockPos, err := p.handleForwardLETEvent(tx, event, &blockPos) - require.NoError(t, err) - require.Equal(t, event.BlockPos+uint64(len(leaves)), newBlockPos) - - // Verify: All bridges were inserted - var bridges []*Bridge - err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1 ORDER BY block_pos", event.BlockNum) - require.NoError(t, err) - require.Len(t, bridges, 3) - - // Verify each bridge - for i, bridge := range bridges { - require.Equal(t, event.BlockNum, bridge.BlockNum) - require.Equal(t, event.BlockPos+uint64(i), bridge.BlockPos) - require.Equal(t, leaves[i].LeafType, bridge.LeafType) - require.Equal(t, leaves[i].OriginNetwork, bridge.OriginNetwork) - require.Equal(t, initialDepositCount+uint32(i)+1, bridge.DepositCount) - require.Equal(t, BridgeSourceForwardLET, bridge.Source) - } - }) - - t.Run("process leaf with matching archived bridge", func(t *testing.T) { - p, tx := setupProcessorWithTransaction(t) - defer tx.Rollback() //nolint:errcheck - - // Setup: Insert initial leaves (indices 0-14) - var initialRoot common.Hash - var err error - // Insert block rows for initial leaves - for i := uint32(0); i <= 14; i++ { - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 30+uint64(i)) - require.NoError(t, err) - } - for i := uint32(0); i <= 14; i++ { - leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} - initialRoot, err = p.exitTree.PutLeaf(tx, 30+uint64(i), 0, leaf) - require.NoError(t, err) - } - initialDepositCount := uint32(14) // Last index inserted - - // Insert blocks for the archived bridge and ForwardLET event - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1), ($2)`, uint64(50), uint64(300)) - require.NoError(t, err) - - // Setup: Create and archive a bridge that will match the forward LET leaf - archivedTxHash := common.HexToHash("0xoriginal123") - archivedTxnSender := common.HexToAddress("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - archivedFromAddr := common.HexToAddress("0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") - - archivedBridge := &Bridge{ - BlockNum: 50, - BlockPos: 0, - LeafType: 1, - OriginNetwork: 7, - OriginAddress: common.HexToAddress("0x7777777777777777777777777777777777777777"), - DestinationNetwork: 8, - DestinationAddress: common.HexToAddress("0x8888888888888888888888888888888888888888"), - Amount: big.NewInt(500000), - Metadata: []byte("archived metadata"), - DepositCount: 20, - TxHash: archivedTxHash, - TxnSender: archivedTxnSender, - FromAddress: archivedFromAddr, - // Don't set Source - bridge_archive table doesn't have this column - } - // Insert manually to avoid Source field - err = meddler.Insert(tx, "bridge_archive", archivedBridge) - require.NoError(t, err) - - // Create forward LET event with matching leaf - leaves := []LeafData{ - { - LeafType: archivedBridge.LeafType, - OriginNetwork: archivedBridge.OriginNetwork, - OriginAddress: archivedBridge.OriginAddress, - DestinationNetwork: archivedBridge.DestinationNetwork, - DestinationAddress: archivedBridge.DestinationAddress, - Amount: archivedBridge.Amount, - Metadata: archivedBridge.Metadata, - }, - } - encodedLeaves := encodeLeafDataArrayForTest(t, leaves) - - event := &ForwardLET{ - BlockNum: 300, - BlockPos: 20, - BlockTimestamp: 1234567950, - TxnHash: common.HexToHash("0xforward789"), - PreviousDepositCount: big.NewInt(int64(initialDepositCount)), - PreviousRoot: initialRoot, - NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), - NewLeaves: encodedLeaves, - } - - // Calculate expected new root using helper (which will query for archived bridge) - event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event, archivedBridge) - - // Test: Process the forward LET event - blockPos := event.BlockPos - newBlockPos, err := p.handleForwardLETEvent(tx, event, &blockPos) - require.NoError(t, err) - require.Equal(t, event.BlockPos+1, newBlockPos) - - // Verify: Bridge was inserted with archived tx info - var bridges []*Bridge - err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1", event.BlockNum) - require.NoError(t, err) - require.Len(t, bridges, 1) - - bridge := bridges[0] - require.Equal(t, archivedTxHash, bridge.TxHash, "Should use archived tx hash") - require.Equal(t, archivedTxnSender, bridge.TxnSender, "Should use archived txn sender") - require.Equal(t, archivedFromAddr, bridge.FromAddress, "Should use archived from address") - require.Equal(t, BridgeSourceForwardLET, bridge.Source) - }) - - t.Run("process leaf with multiple matching archived bridges", func(t *testing.T) { - p, tx := setupProcessorWithTransaction(t) - defer tx.Rollback() //nolint:errcheck - - // Setup: Insert initial leaves (indices 0-24) - var initialRoot common.Hash - var err error - // Insert block rows for initial leaves - for i := uint32(0); i <= 24; i++ { - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 40+uint64(i)) - require.NoError(t, err) - } - for i := uint32(0); i <= 24; i++ { - leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} - initialRoot, err = p.exitTree.PutLeaf(tx, 40+uint64(i), 0, leaf) - require.NoError(t, err) - } - initialDepositCount := uint32(24) // Last index inserted - - // Insert blocks for archived bridges (60, 61 already exist from initial leaves) and ForwardLET event - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(400)) - require.NoError(t, err) - - // Setup: Create two archived bridges with identical LeafData fields - commonLeafData := LeafData{ - LeafType: 1, - OriginNetwork: 9, - OriginAddress: common.HexToAddress("0x9999999999999999999999999999999999999999"), - DestinationNetwork: 11, - DestinationAddress: common.HexToAddress("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), - Amount: big.NewInt(750000), - Metadata: []byte("duplicate metadata"), - } - - archivedBridge1 := &Bridge{ - BlockNum: 60, - BlockPos: 0, - LeafType: commonLeafData.LeafType, - OriginNetwork: commonLeafData.OriginNetwork, - OriginAddress: commonLeafData.OriginAddress, - DestinationNetwork: commonLeafData.DestinationNetwork, - DestinationAddress: commonLeafData.DestinationAddress, - Amount: commonLeafData.Amount, - Metadata: commonLeafData.Metadata, - DepositCount: 30, - TxHash: common.HexToHash("0xfirst111"), - TxnSender: common.HexToAddress("0x1111111111111111111111111111111111111111"), - FromAddress: common.HexToAddress("0x2222222222222222222222222222222222222222"), - } - - archivedBridge2 := &Bridge{ - BlockNum: 61, - BlockPos: 0, - LeafType: commonLeafData.LeafType, - OriginNetwork: commonLeafData.OriginNetwork, - OriginAddress: commonLeafData.OriginAddress, - DestinationNetwork: commonLeafData.DestinationNetwork, - DestinationAddress: commonLeafData.DestinationAddress, - Amount: commonLeafData.Amount, - Metadata: commonLeafData.Metadata, - DepositCount: 31, - TxHash: common.HexToHash("0xsecond222"), - TxnSender: common.HexToAddress("0x3333333333333333333333333333333333333333"), - FromAddress: common.HexToAddress("0x4444444444444444444444444444444444444444"), - } - - // Insert both archived bridges manually (to avoid Source column) - for _, archived := range []*Bridge{archivedBridge1, archivedBridge2} { - err = meddler.Insert(tx, "bridge_archive", archived) - require.NoError(t, err) - } - - // Create forward LET event with the common leaf - leaves := []LeafData{commonLeafData} - encodedLeaves := encodeLeafDataArrayForTest(t, leaves) - - event := &ForwardLET{ - BlockNum: 400, - BlockPos: 30, - BlockTimestamp: 1234567999, - TxnHash: common.HexToHash("0xforward999"), - PreviousDepositCount: big.NewInt(int64(initialDepositCount)), - PreviousRoot: initialRoot, - NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), - NewLeaves: encodedLeaves, - } - - // Calculate expected new root using helper (with no archived bridge info since multiple matches) - event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event) - - // Test: Process the forward LET event - blockPos := event.BlockPos - newBlockPos, err := p.handleForwardLETEvent(tx, event, &blockPos) - require.NoError(t, err) - require.Equal(t, event.BlockPos+1, newBlockPos) - - // Verify: Bridge was inserted with event's tx hash and empty addresses - var bridges []*Bridge - err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1", event.BlockNum) - require.NoError(t, err) - require.Len(t, bridges, 1) - - bridge := bridges[0] - require.Equal(t, event.TxnHash, bridge.TxHash, "Should use event's tx hash when multiple archived bridges match") - require.Equal(t, common.Address{}, bridge.TxnSender, "TxnSender should be empty with multiple matches") - require.Equal(t, common.Address{}, bridge.FromAddress, "FromAddress should be empty with multiple matches") - require.Equal(t, BridgeSourceForwardLET, bridge.Source) - }) - - t.Run("error on previous root mismatch", func(t *testing.T) { - p, tx := setupProcessorWithTransaction(t) - defer tx.Rollback() //nolint:errcheck - - // Setup: Insert initial leaves (indices 0-4) - var initialRoot common.Hash - var err error - for i := uint32(0); i <= 4; i++ { - leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} - initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) - require.NoError(t, err) - } - initialDepositCount := uint32(4) // Last index inserted - - // Create forward LET event with WRONG previous root - leaves := []LeafData{ - { - LeafType: 1, - OriginNetwork: 5, - OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), - DestinationNetwork: 10, - DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), - Amount: big.NewInt(1000000), - Metadata: []byte("test"), - }, - } - encodedLeaves := encodeLeafDataArrayForTest(t, leaves) - - event := &ForwardLET{ - BlockNum: 100, - BlockPos: 5, - BlockTimestamp: 1234567890, - TxnHash: common.HexToHash("0xabc123"), - PreviousDepositCount: big.NewInt(int64(initialDepositCount)), - PreviousRoot: common.HexToHash("0xWRONG"), // Wrong root - NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), - NewRoot: common.HexToHash("0x999"), - NewLeaves: encodedLeaves, - } - - // Test: Should fail with root mismatch - blockPos := event.BlockPos - _, err = p.handleForwardLETEvent(tx, event, &blockPos) - require.Error(t, err) - require.Contains(t, err.Error(), "local exit root mismatch") - require.Contains(t, err.Error(), initialRoot.String()) - }) - - t.Run("error on new root mismatch", func(t *testing.T) { - p, tx := setupProcessorWithTransaction(t) - defer tx.Rollback() //nolint:errcheck - - // Setup: Insert initial leaves (indices 0-4) - var initialRoot common.Hash - var err error - // Insert block rows for initial leaves - for i := uint32(0); i <= 4; i++ { - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 10+uint64(i)) - require.NoError(t, err) - } - for i := uint32(0); i <= 4; i++ { - leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} - initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) - require.NoError(t, err) - } - initialDepositCount := uint32(4) // Last index inserted - - // Insert block for the ForwardLET event - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(100)) - require.NoError(t, err) - - // Create forward LET event - leaves := []LeafData{ - { - LeafType: 1, - OriginNetwork: 5, - OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), - DestinationNetwork: 10, - DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), - Amount: big.NewInt(1000000), - Metadata: []byte("test"), - }, - } - encodedLeaves := encodeLeafDataArrayForTest(t, leaves) - - event := &ForwardLET{ - BlockNum: 100, - BlockPos: 5, - BlockTimestamp: 1234567890, - TxnHash: common.HexToHash("0xabc123"), - PreviousDepositCount: big.NewInt(int64(initialDepositCount)), - PreviousRoot: initialRoot, - NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), - NewRoot: common.HexToHash("0xWRONG"), // Wrong new root - NewLeaves: encodedLeaves, - } - - // Test: Should fail with new root mismatch after processing - blockPos := event.BlockPos - _, err = p.handleForwardLETEvent(tx, event, &blockPos) - require.Error(t, err) - require.Contains(t, err.Error(), "local exit root mismatch") - }) - - t.Run("error on invalid encoded leaves", func(t *testing.T) { - p, tx := setupProcessorWithTransaction(t) - defer tx.Rollback() //nolint:errcheck - - // Setup: Insert initial leaves (indices 0-4) - var initialRoot common.Hash - var err error - for i := uint32(0); i <= 4; i++ { - leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} - initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) - require.NoError(t, err) - } - initialDepositCount := uint32(4) // Last index inserted - - event := &ForwardLET{ - BlockNum: 100, - BlockPos: 5, - BlockTimestamp: 1234567890, - TxnHash: common.HexToHash("0xabc123"), - PreviousDepositCount: big.NewInt(int64(initialDepositCount)), - PreviousRoot: initialRoot, - NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), - NewRoot: common.Hash{}, - NewLeaves: []byte("invalid data"), // Invalid encoding - } - - // Test: Should fail to decode leaves - blockPos := event.BlockPos - _, err = p.handleForwardLETEvent(tx, event, &blockPos) - require.Error(t, err) - require.Contains(t, err.Error(), "failed to decode new leaves") - }) - - t.Run("process with nil blockPos parameter", func(t *testing.T) { - p, tx := setupProcessorWithTransaction(t) - defer tx.Rollback() //nolint:errcheck - - // Setup: Insert initial leaves (indices 0-4) - var initialRoot common.Hash - var err error - // Insert block rows for initial leaves - for i := uint32(0); i <= 4; i++ { - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 10+uint64(i)) - require.NoError(t, err) - } - for i := uint32(0); i <= 4; i++ { - leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} - initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) - require.NoError(t, err) - } - initialDepositCount := uint32(4) // Last index inserted - - // Insert block for the ForwardLET event - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(100)) - require.NoError(t, err) - - leaves := []LeafData{ - { - LeafType: 1, - OriginNetwork: 5, - OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), - DestinationNetwork: 10, - DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), - Amount: big.NewInt(1000000), - Metadata: []byte("test"), - }, - } - encodedLeaves := encodeLeafDataArrayForTest(t, leaves) - - event := &ForwardLET{ - BlockNum: 100, - BlockPos: 5, - BlockTimestamp: 1234567890, - TxnHash: common.HexToHash("0xabc123"), - PreviousDepositCount: big.NewInt(int64(initialDepositCount)), - PreviousRoot: initialRoot, - NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), - NewLeaves: encodedLeaves, - } - - // Calculate expected root using helper - event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event) - - // Test: Process with nil blockPos (should use event.BlockPos) - newBlockPos, err := p.handleForwardLETEvent(tx, event, nil) - require.NoError(t, err) - require.Equal(t, event.BlockPos+1, newBlockPos) - - // Verify: Bridge uses event.BlockPos - var bridges []*Bridge - err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1", event.BlockNum) - require.NoError(t, err) - require.Len(t, bridges, 1) - require.Equal(t, event.BlockPos, bridges[0].BlockPos) - }) -} - -// setupProcessorWithTransaction creates a processor and begins a transaction for testing -func setupProcessorWithTransaction(t *testing.T) (*processor, dbtypes.Txer) { - t.Helper() - - dbPath := filepath.Join(t.TempDir(), "test_forward_let.db") - err := migrations.RunMigrations(dbPath) - require.NoError(t, err) - - logger := log.WithFields("module", "test") - p, err := newProcessor(dbPath, "test", logger, dbQueryTimeout) - require.NoError(t, err) - - tx, err := db.NewTx(t.Context(), p.db) - require.NoError(t, err) - - return p, tx -} - -// calculateExpectedRootAfterForwardLET calculates what the tree root will be after processing ForwardLET leaves -// It does this using a completely separate processor to avoid affecting the test state -// archivedBridges: optional map from leaf index (in leaves slice) to archived bridge info -func calculateExpectedRootAfterForwardLET(t *testing.T, initialDepositCount uint32, - leaves []LeafData, event *ForwardLET, archivedBridges ...*Bridge) common.Hash { - t.Helper() - - // Build a map for quick lookup of archived bridge info by leaf data - archivedByLeaf := make(map[int]*Bridge) - for i, archived := range archivedBridges { - if archived != nil { - archivedByLeaf[i] = archived - } - } - - // Create a temporary processor with its own database - tempDBPath := filepath.Join(t.TempDir(), "temp_calc.db") - err := migrations.RunMigrations(tempDBPath) - require.NoError(t, err) - - logger := log.WithFields("module", "test-calc") - tempP, err := newProcessor(tempDBPath, "test-calc", logger, dbQueryTimeout) - require.NoError(t, err) - - tempTx, err := db.NewTx(t.Context(), tempP.db) - require.NoError(t, err) - defer tempTx.Rollback() //nolint:errcheck - - // Insert block rows for the setup leaves - for i := uint32(0); i <= initialDepositCount; i++ { - _, err = tempTx.Exec(`INSERT INTO block (num) VALUES ($1)`, 10+uint64(i)) - require.NoError(t, err) - } - - // Insert block row for the ForwardLET event - _, err = tempTx.Exec(`INSERT INTO block (num) VALUES ($1)`, event.BlockNum) - require.NoError(t, err) - - // Insert archived bridges if provided - for _, archived := range archivedBridges { - if archived != nil { - _, err = tempTx.Exec(`INSERT INTO block (num) VALUES ($1)`, archived.BlockNum) - require.NoError(t, err) - - _, err = tempTx.Exec(` - INSERT INTO bridge_archive ( - block_num, block_pos, leaf_type, origin_network, origin_address, - destination_network, destination_address, amount, metadata, deposit_count, - tx_hash, block_timestamp, from_address, txn_sender - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, 0, $12, $13) - `, archived.BlockNum, archived.BlockPos, archived.LeafType, - archived.OriginNetwork, archived.OriginAddress.Hex(), - archived.DestinationNetwork, archived.DestinationAddress.Hex(), - archived.Amount.String(), archived.Metadata, archived.DepositCount, - archived.TxHash.Hex(), archived.FromAddress.Hex(), archived.TxnSender.Hex()) - require.NoError(t, err) - } - } - - // Rebuild tree state up to initialDepositCount - for i := uint32(0); i <= initialDepositCount; i++ { - leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} - _, err = tempP.exitTree.PutLeaf(tempTx, 10+uint64(i), 0, leaf) - require.NoError(t, err) - } - - // Now add the ForwardLET leaves (will query for archived bridges) - currentDepositCount := initialDepositCount + 1 - var newRoot common.Hash - for i, leaf := range leaves { - // Try to get archived bridge info if available - var txHash common.Hash - var txnSender, fromAddr common.Address - if archived, found := archivedByLeaf[i]; found { - txHash = archived.TxHash - txnSender = archived.TxnSender - fromAddr = archived.FromAddress - } else { - txHash = event.TxnHash - // txnSender and fromAddr remain zero - } - - bridge := leaf.ToBridge( - event.BlockNum, - event.BlockPos+uint64(i), - event.BlockTimestamp, - currentDepositCount, - txHash, - txnSender, - fromAddr, - ) - newRoot, err = tempP.exitTree.PutLeaf(tempTx, event.BlockNum, event.BlockPos+uint64(i), types.Leaf{ - Index: currentDepositCount, - Hash: bridge.Hash(), - }) - require.NoError(t, err) - currentDepositCount++ - } - - return newRoot -} - -// encodeLeafDataArrayForTest encodes a slice of LeafData using ABI encoding -func encodeLeafDataArrayForTest(t *testing.T, leaves []LeafData) []byte { - t.Helper() - - encodedBytes, err := aggkitabi.EncodeABIStructArray(leaves) - require.NoError(t, err) - - return encodedBytes -} diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 2b00b9783..c868c3fd8 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -19,10 +19,13 @@ import ( "time" "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/polygonzkevmbridge" + aggkitabi "github.com/agglayer/aggkit/abi" bridgetypes "github.com/agglayer/aggkit/bridgeservice/types" "github.com/agglayer/aggkit/bridgesync/migrations" bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" + aggkitcommon "github.com/agglayer/aggkit/common" "github.com/agglayer/aggkit/db" + dbtypes "github.com/agglayer/aggkit/db/types" "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/sync" "github.com/agglayer/aggkit/tree/testvectors" @@ -5841,3 +5844,718 @@ func TestGetBoundaryBlock(t *testing.T) { }) } } + +func TestHandleForwardLETEvent(t *testing.T) { + t.Run("successfully process single leaf with no archived bridge", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves to establish previous root (indices 0-4) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 4; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 10+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 4; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(4) // Last index inserted + + // Insert block for the ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(100)) + require.NoError(t, err) + + // Create forward LET event with one leaf + leaves := []LeafData{ + { + LeafType: 1, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + DestinationNetwork: 10, + DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + Amount: big.NewInt(1000000), + Metadata: []byte("test metadata"), + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 100, + BlockPos: 5, + BlockTimestamp: 1234567890, + TxnHash: common.HexToHash("0xabc123"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewLeaves: encodedLeaves, + } + + // Calculate the expected root that will result from processing these leaves + event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event) + + // Test: Process the forward LET event + blockPos := event.BlockPos + newBlockPos, err := p.handleForwardLETEvent(tx, event, &blockPos) + require.NoError(t, err) + require.Equal(t, event.BlockPos+1, newBlockPos) + + // Verify: Bridge was inserted + var bridges []*Bridge + err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1", event.BlockNum) + require.NoError(t, err) + require.Len(t, bridges, 1) + + bridge := bridges[0] + require.Equal(t, event.BlockNum, bridge.BlockNum) + require.Equal(t, event.BlockPos, bridge.BlockPos) + require.Equal(t, leaves[0].LeafType, bridge.LeafType) + require.Equal(t, leaves[0].OriginNetwork, bridge.OriginNetwork) + require.Equal(t, leaves[0].OriginAddress, bridge.OriginAddress) + require.Equal(t, leaves[0].DestinationNetwork, bridge.DestinationNetwork) + require.Equal(t, leaves[0].DestinationAddress, bridge.DestinationAddress) + require.Equal(t, 0, leaves[0].Amount.Cmp(bridge.Amount)) + require.Equal(t, leaves[0].Metadata, bridge.Metadata) + require.Equal(t, initialDepositCount+1, bridge.DepositCount) + require.Equal(t, event.TxnHash, bridge.TxHash) + require.Equal(t, aggkitcommon.ZeroAddress, bridge.TxnSender) + require.Equal(t, aggkitcommon.ZeroAddress, bridge.FromAddress) + require.Equal(t, BridgeSourceForwardLET, bridge.Source) + + // Verify: ForwardLET event was inserted + var forwardLETs []*ForwardLET + err = meddler.QueryAll(tx, &forwardLETs, "SELECT * FROM forward_let WHERE block_num = $1", event.BlockNum) + require.NoError(t, err) + require.Len(t, forwardLETs, 1) + require.Equal(t, event.BlockNum, forwardLETs[0].BlockNum) + }) + + t.Run("successfully process multiple leaves", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-9) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 9; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 20+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 9; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 20+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(9) // Last index inserted + + // Insert block for the ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(200)) + require.NoError(t, err) + + // Create forward LET event with three leaves + leaves := []LeafData{ + { + LeafType: 0, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x1111111111111111111111111111111111111111"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x2222222222222222222222222222222222222222"), + Amount: big.NewInt(100), + Metadata: []byte("first"), + }, + { + LeafType: 1, + OriginNetwork: 3, + OriginAddress: common.HexToAddress("0x3333333333333333333333333333333333333333"), + DestinationNetwork: 4, + DestinationAddress: common.HexToAddress("0x4444444444444444444444444444444444444444"), + Amount: big.NewInt(200), + Metadata: []byte("second"), + }, + { + LeafType: 2, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x5555555555555555555555555555555555555555"), + DestinationNetwork: 6, + DestinationAddress: common.HexToAddress("0x6666666666666666666666666666666666666666"), + Amount: big.NewInt(300), + Metadata: []byte("third"), + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 200, + BlockPos: 10, + BlockTimestamp: 1234567900, + TxnHash: common.HexToHash("0xdef456"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + uint32(len(leaves)))), + NewLeaves: encodedLeaves, + } + + // Calculate the expected root that will result from processing these leaves + event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event) + + // Test: Process the forward LET event + blockPos := event.BlockPos + newBlockPos, err := p.handleForwardLETEvent(tx, event, &blockPos) + require.NoError(t, err) + require.Equal(t, event.BlockPos+uint64(len(leaves)), newBlockPos) + + // Verify: All bridges were inserted + var bridges []*Bridge + err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1 ORDER BY block_pos", event.BlockNum) + require.NoError(t, err) + require.Len(t, bridges, 3) + + // Verify each bridge + for i, bridge := range bridges { + require.Equal(t, event.BlockNum, bridge.BlockNum) + require.Equal(t, event.BlockPos+uint64(i), bridge.BlockPos) + require.Equal(t, leaves[i].LeafType, bridge.LeafType) + require.Equal(t, leaves[i].OriginNetwork, bridge.OriginNetwork) + require.Equal(t, initialDepositCount+uint32(i)+1, bridge.DepositCount) + require.Equal(t, BridgeSourceForwardLET, bridge.Source) + } + }) + + t.Run("process leaf with matching archived bridge", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-14) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 14; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 30+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 14; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 30+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(14) // Last index inserted + + // Insert blocks for the archived bridge and ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1), ($2)`, uint64(50), uint64(300)) + require.NoError(t, err) + + // Setup: Create and archive a bridge that will match the forward LET leaf + archivedTxHash := common.HexToHash("0xoriginal123") + archivedTxnSender := common.HexToAddress("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + archivedFromAddr := common.HexToAddress("0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + + archivedBridge := &Bridge{ + BlockNum: 50, + BlockPos: 0, + LeafType: 1, + OriginNetwork: 7, + OriginAddress: common.HexToAddress("0x7777777777777777777777777777777777777777"), + DestinationNetwork: 8, + DestinationAddress: common.HexToAddress("0x8888888888888888888888888888888888888888"), + Amount: big.NewInt(500000), + Metadata: []byte("archived metadata"), + DepositCount: 20, + TxHash: archivedTxHash, + TxnSender: archivedTxnSender, + FromAddress: archivedFromAddr, + // Don't set Source - bridge_archive table doesn't have this column + } + // Insert manually to avoid Source field + err = meddler.Insert(tx, "bridge_archive", archivedBridge) + require.NoError(t, err) + + // Create forward LET event with matching leaf + leaves := []LeafData{ + { + LeafType: archivedBridge.LeafType, + OriginNetwork: archivedBridge.OriginNetwork, + OriginAddress: archivedBridge.OriginAddress, + DestinationNetwork: archivedBridge.DestinationNetwork, + DestinationAddress: archivedBridge.DestinationAddress, + Amount: archivedBridge.Amount, + Metadata: archivedBridge.Metadata, + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 300, + BlockPos: 20, + BlockTimestamp: 1234567950, + TxnHash: common.HexToHash("0xforward789"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewLeaves: encodedLeaves, + } + + // Calculate expected new root using helper (which will query for archived bridge) + event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event, archivedBridge) + + // Test: Process the forward LET event + blockPos := event.BlockPos + newBlockPos, err := p.handleForwardLETEvent(tx, event, &blockPos) + require.NoError(t, err) + require.Equal(t, event.BlockPos+1, newBlockPos) + + // Verify: Bridge was inserted with archived tx info + var bridges []*Bridge + err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1", event.BlockNum) + require.NoError(t, err) + require.Len(t, bridges, 1) + + bridge := bridges[0] + require.Equal(t, archivedTxHash, bridge.TxHash, "Should use archived tx hash") + require.Equal(t, archivedTxnSender, bridge.TxnSender, "Should use archived txn sender") + require.Equal(t, archivedFromAddr, bridge.FromAddress, "Should use archived from address") + require.Equal(t, BridgeSourceForwardLET, bridge.Source) + }) + + t.Run("process leaf with multiple matching archived bridges", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-24) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 24; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 40+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 24; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 40+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(24) // Last index inserted + + // Insert blocks for archived bridges (60, 61 already exist from initial leaves) and ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(400)) + require.NoError(t, err) + + // Setup: Create two archived bridges with identical LeafData fields + commonLeafData := LeafData{ + LeafType: 1, + OriginNetwork: 9, + OriginAddress: common.HexToAddress("0x9999999999999999999999999999999999999999"), + DestinationNetwork: 11, + DestinationAddress: common.HexToAddress("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + Amount: big.NewInt(750000), + Metadata: []byte("duplicate metadata"), + } + + archivedBridge1 := &Bridge{ + BlockNum: 60, + BlockPos: 0, + LeafType: commonLeafData.LeafType, + OriginNetwork: commonLeafData.OriginNetwork, + OriginAddress: commonLeafData.OriginAddress, + DestinationNetwork: commonLeafData.DestinationNetwork, + DestinationAddress: commonLeafData.DestinationAddress, + Amount: commonLeafData.Amount, + Metadata: commonLeafData.Metadata, + DepositCount: 30, + TxHash: common.HexToHash("0xfirst111"), + TxnSender: common.HexToAddress("0x1111111111111111111111111111111111111111"), + FromAddress: common.HexToAddress("0x2222222222222222222222222222222222222222"), + } + + archivedBridge2 := &Bridge{ + BlockNum: 61, + BlockPos: 0, + LeafType: commonLeafData.LeafType, + OriginNetwork: commonLeafData.OriginNetwork, + OriginAddress: commonLeafData.OriginAddress, + DestinationNetwork: commonLeafData.DestinationNetwork, + DestinationAddress: commonLeafData.DestinationAddress, + Amount: commonLeafData.Amount, + Metadata: commonLeafData.Metadata, + DepositCount: 31, + TxHash: common.HexToHash("0xsecond222"), + TxnSender: common.HexToAddress("0x3333333333333333333333333333333333333333"), + FromAddress: common.HexToAddress("0x4444444444444444444444444444444444444444"), + } + + // Insert both archived bridges manually (to avoid Source column) + for _, archived := range []*Bridge{archivedBridge1, archivedBridge2} { + err = meddler.Insert(tx, "bridge_archive", archived) + require.NoError(t, err) + } + + // Create forward LET event with the common leaf + leaves := []LeafData{commonLeafData} + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 400, + BlockPos: 30, + BlockTimestamp: 1234567999, + TxnHash: common.HexToHash("0xforward999"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewLeaves: encodedLeaves, + } + + // Calculate expected new root using helper (with no archived bridge info since multiple matches) + event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event) + + // Test: Process the forward LET event + blockPos := event.BlockPos + newBlockPos, err := p.handleForwardLETEvent(tx, event, &blockPos) + require.NoError(t, err) + require.Equal(t, event.BlockPos+1, newBlockPos) + + // Verify: Bridge was inserted with event's tx hash and empty addresses + var bridges []*Bridge + err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1", event.BlockNum) + require.NoError(t, err) + require.Len(t, bridges, 1) + + bridge := bridges[0] + require.Equal(t, event.TxnHash, bridge.TxHash, "Should use event's tx hash when multiple archived bridges match") + require.Equal(t, common.Address{}, bridge.TxnSender, "TxnSender should be empty with multiple matches") + require.Equal(t, common.Address{}, bridge.FromAddress, "FromAddress should be empty with multiple matches") + require.Equal(t, BridgeSourceForwardLET, bridge.Source) + }) + + t.Run("error on previous root mismatch", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-4) + var initialRoot common.Hash + var err error + for i := uint32(0); i <= 4; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(4) // Last index inserted + + // Create forward LET event with WRONG previous root + leaves := []LeafData{ + { + LeafType: 1, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + DestinationNetwork: 10, + DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + Amount: big.NewInt(1000000), + Metadata: []byte("test"), + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 100, + BlockPos: 5, + BlockTimestamp: 1234567890, + TxnHash: common.HexToHash("0xabc123"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: common.HexToHash("0xWRONG"), // Wrong root + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewRoot: common.HexToHash("0x999"), + NewLeaves: encodedLeaves, + } + + // Test: Should fail with root mismatch + blockPos := event.BlockPos + _, err = p.handleForwardLETEvent(tx, event, &blockPos) + require.Error(t, err) + require.Contains(t, err.Error(), "local exit root mismatch") + require.Contains(t, err.Error(), initialRoot.String()) + }) + + t.Run("error on new root mismatch", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-4) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 4; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 10+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 4; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(4) // Last index inserted + + // Insert block for the ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(100)) + require.NoError(t, err) + + // Create forward LET event + leaves := []LeafData{ + { + LeafType: 1, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + DestinationNetwork: 10, + DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + Amount: big.NewInt(1000000), + Metadata: []byte("test"), + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 100, + BlockPos: 5, + BlockTimestamp: 1234567890, + TxnHash: common.HexToHash("0xabc123"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewRoot: common.HexToHash("0xWRONG"), // Wrong new root + NewLeaves: encodedLeaves, + } + + // Test: Should fail with new root mismatch after processing + blockPos := event.BlockPos + _, err = p.handleForwardLETEvent(tx, event, &blockPos) + require.Error(t, err) + require.Contains(t, err.Error(), "local exit root mismatch") + }) + + t.Run("error on invalid encoded leaves", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-4) + var initialRoot common.Hash + var err error + for i := uint32(0); i <= 4; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(4) // Last index inserted + + event := &ForwardLET{ + BlockNum: 100, + BlockPos: 5, + BlockTimestamp: 1234567890, + TxnHash: common.HexToHash("0xabc123"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewRoot: common.Hash{}, + NewLeaves: []byte("invalid data"), // Invalid encoding + } + + // Test: Should fail to decode leaves + blockPos := event.BlockPos + _, err = p.handleForwardLETEvent(tx, event, &blockPos) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to decode new leaves") + }) + + t.Run("process with nil blockPos parameter", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-4) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 4; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 10+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 4; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(4) // Last index inserted + + // Insert block for the ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(100)) + require.NoError(t, err) + + leaves := []LeafData{ + { + LeafType: 1, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + DestinationNetwork: 10, + DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + Amount: big.NewInt(1000000), + Metadata: []byte("test"), + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 100, + BlockPos: 5, + BlockTimestamp: 1234567890, + TxnHash: common.HexToHash("0xabc123"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewLeaves: encodedLeaves, + } + + // Calculate expected root using helper + event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event) + + // Test: Process with nil blockPos (should use event.BlockPos) + newBlockPos, err := p.handleForwardLETEvent(tx, event, nil) + require.NoError(t, err) + require.Equal(t, event.BlockPos+1, newBlockPos) + + // Verify: Bridge uses event.BlockPos + var bridges []*Bridge + err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1", event.BlockNum) + require.NoError(t, err) + require.Len(t, bridges, 1) + require.Equal(t, event.BlockPos, bridges[0].BlockPos) + }) +} + +// setupProcessorWithTransaction creates a processor and begins a transaction for testing +func setupProcessorWithTransaction(t *testing.T) (*processor, dbtypes.Txer) { + t.Helper() + + dbPath := filepath.Join(t.TempDir(), "test_forward_let.db") + err := migrations.RunMigrations(dbPath) + require.NoError(t, err) + + logger := log.WithFields("module", "test") + p, err := newProcessor(dbPath, "test", logger, dbQueryTimeout) + require.NoError(t, err) + + tx, err := db.NewTx(t.Context(), p.db) + require.NoError(t, err) + + return p, tx +} + +// calculateExpectedRootAfterForwardLET calculates what the tree root will be after processing ForwardLET leaves +// It does this using a completely separate processor to avoid affecting the test state +// archivedBridges: optional map from leaf index (in leaves slice) to archived bridge info +func calculateExpectedRootAfterForwardLET(t *testing.T, initialDepositCount uint32, + leaves []LeafData, event *ForwardLET, archivedBridges ...*Bridge) common.Hash { + t.Helper() + + // Build a map for quick lookup of archived bridge info by leaf data + archivedByLeaf := make(map[int]*Bridge) + for i, archived := range archivedBridges { + if archived != nil { + archivedByLeaf[i] = archived + } + } + + // Create a temporary processor with its own database + tempDBPath := filepath.Join(t.TempDir(), "temp_calc.db") + err := migrations.RunMigrations(tempDBPath) + require.NoError(t, err) + + logger := log.WithFields("module", "test-calc") + tempP, err := newProcessor(tempDBPath, "test-calc", logger, dbQueryTimeout) + require.NoError(t, err) + + tempTx, err := db.NewTx(t.Context(), tempP.db) + require.NoError(t, err) + defer tempTx.Rollback() //nolint:errcheck + + // Insert block rows for the setup leaves + for i := uint32(0); i <= initialDepositCount; i++ { + _, err = tempTx.Exec(`INSERT INTO block (num) VALUES ($1)`, 10+uint64(i)) + require.NoError(t, err) + } + + // Insert block row for the ForwardLET event + _, err = tempTx.Exec(`INSERT INTO block (num) VALUES ($1)`, event.BlockNum) + require.NoError(t, err) + + // Insert archived bridges if provided + for _, archived := range archivedBridges { + if archived != nil { + _, err = tempTx.Exec(`INSERT INTO block (num) VALUES ($1)`, archived.BlockNum) + require.NoError(t, err) + + _, err = tempTx.Exec(` + INSERT INTO bridge_archive ( + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, deposit_count, + tx_hash, block_timestamp, from_address, txn_sender + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, 0, $12, $13) + `, archived.BlockNum, archived.BlockPos, archived.LeafType, + archived.OriginNetwork, archived.OriginAddress.Hex(), + archived.DestinationNetwork, archived.DestinationAddress.Hex(), + archived.Amount.String(), archived.Metadata, archived.DepositCount, + archived.TxHash.Hex(), archived.FromAddress.Hex(), archived.TxnSender.Hex()) + require.NoError(t, err) + } + } + + // Rebuild tree state up to initialDepositCount + for i := uint32(0); i <= initialDepositCount; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + _, err = tempP.exitTree.PutLeaf(tempTx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + + // Now add the ForwardLET leaves (will query for archived bridges) + currentDepositCount := initialDepositCount + 1 + var newRoot common.Hash + for i, leaf := range leaves { + // Try to get archived bridge info if available + var txHash common.Hash + var txnSender, fromAddr common.Address + if archived, found := archivedByLeaf[i]; found { + txHash = archived.TxHash + txnSender = archived.TxnSender + fromAddr = archived.FromAddress + } else { + txHash = event.TxnHash + // txnSender and fromAddr remain zero + } + + bridge := leaf.ToBridge( + event.BlockNum, + event.BlockPos+uint64(i), + event.BlockTimestamp, + currentDepositCount, + txHash, + txnSender, + fromAddr, + ) + newRoot, err = tempP.exitTree.PutLeaf(tempTx, event.BlockNum, event.BlockPos+uint64(i), types.Leaf{ + Index: currentDepositCount, + Hash: bridge.Hash(), + }) + require.NoError(t, err) + currentDepositCount++ + } + + return newRoot +} + +// encodeLeafDataArrayForTest encodes a slice of LeafData using ABI encoding +func encodeLeafDataArrayForTest(t *testing.T, leaves []LeafData) []byte { + t.Helper() + + encodedBytes, err := aggkitabi.EncodeABIStructArray(leaves) + require.NoError(t, err) + + return encodedBytes +} From f86ed29bae59401b8ef06ce718cba95355dbe525 Mon Sep 17 00:00:00 2001 From: Goran Rojovic Date: Fri, 26 Dec 2025 11:26:20 +0100 Subject: [PATCH 51/73] fix: lint --- abi/abi_decode.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/abi/abi_decode.go b/abi/abi_decode.go index 17447d118..1c6bd4752 100644 --- a/abi/abi_decode.go +++ b/abi/abi_decode.go @@ -38,6 +38,10 @@ func DecodeABIEncodedStructArray[T any](encodedBytes []byte) ([]T, error) { return nil, errors.New("unpacked data is empty") } - decodedData := *abi.ConvertType(unpacked[0], new([]T)).(*[]T) - return decodedData, nil + decodedData, ok := abi.ConvertType(unpacked[0], new([]T)).(*[]T) + if !ok { + return nil, errors.New("failed to convert unpacked data to the expected type") + } + + return *decodedData, nil } From 4bbb46ef7173ec59f7cd3e60d3d4151e0a6d803d Mon Sep 17 00:00:00 2001 From: Goran Rojovic Date: Fri, 26 Dec 2025 11:39:16 +0100 Subject: [PATCH 52/73] feat: new UTs --- abi/abi_builder_test.go | 156 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 156 insertions(+) diff --git a/abi/abi_builder_test.go b/abi/abi_builder_test.go index 464a70e4c..f03343a7c 100644 --- a/abi/abi_builder_test.go +++ b/abi/abi_builder_test.go @@ -2,6 +2,7 @@ package abi import ( "math/big" + "reflect" "testing" "github.com/ethereum/go-ethereum/accounts/abi" @@ -69,3 +70,158 @@ func TestBuildABIFields_WithPointer(t *testing.T) { require.Equal(t, expected, fields) } + +func TestInferABIType(t *testing.T) { + tests := []struct { + name string + input reflect.Type + expected string + wantErr bool + }{ + // Special types + { + name: "common.Address", + input: reflect.TypeOf(common.Address{}), + expected: "address", + wantErr: false, + }, + { + name: "big.Int pointer", + input: reflect.TypeOf(&big.Int{}), + expected: "uint256", + wantErr: false, + }, + { + name: "big.Int value", + input: reflect.TypeOf(big.Int{}), + expected: "uint256", + wantErr: false, + }, + { + name: "common.Hash", + input: reflect.TypeOf(common.Hash{}), + expected: "bytes32", + wantErr: false, + }, + // Unsigned integers + { + name: "uint8", + input: reflect.TypeOf(uint8(0)), + expected: "uint8", + wantErr: false, + }, + { + name: "uint16", + input: reflect.TypeOf(uint16(0)), + expected: "uint16", + wantErr: false, + }, + { + name: "uint32", + input: reflect.TypeOf(uint32(0)), + expected: "uint32", + wantErr: false, + }, + { + name: "uint64", + input: reflect.TypeOf(uint64(0)), + expected: "uint64", + wantErr: false, + }, + // Signed integers + { + name: "int8", + input: reflect.TypeOf(int8(0)), + expected: "int8", + wantErr: false, + }, + { + name: "int16", + input: reflect.TypeOf(int16(0)), + expected: "int16", + wantErr: false, + }, + { + name: "int32", + input: reflect.TypeOf(int32(0)), + expected: "int32", + wantErr: false, + }, + { + name: "int64", + input: reflect.TypeOf(int64(0)), + expected: "int64", + wantErr: false, + }, + // Other basic types + { + name: "bool", + input: reflect.TypeOf(true), + expected: "bool", + wantErr: false, + }, + { + name: "string", + input: reflect.TypeOf(""), + expected: "string", + wantErr: false, + }, + // Slice and array types + { + name: "byte slice", + input: reflect.TypeOf([]byte{}), + expected: "bytes", + wantErr: false, + }, + { + name: "byte array", + input: reflect.TypeOf([32]byte{}), + expected: "bytes32", + wantErr: false, + }, + { + name: "byte array different size", + input: reflect.TypeOf([20]byte{}), + expected: "bytes20", + wantErr: false, + }, + // Error cases + { + name: "unsupported slice type", + input: reflect.TypeOf([]int{}), + expected: "", + wantErr: true, + }, + { + name: "unsupported array type", + input: reflect.TypeOf([5]int{}), + expected: "", + wantErr: true, + }, + { + name: "unsupported type map", + input: reflect.TypeOf(map[string]string{}), + expected: "", + wantErr: true, + }, + { + name: "unsupported type struct", + input: reflect.TypeOf(struct{}{}), + expected: "", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := inferABIType(tt.input) + if tt.wantErr { + require.Error(t, err) + require.Contains(t, err.Error(), "unsupported") + } else { + require.NoError(t, err) + require.Equal(t, tt.expected, result) + } + }) + } +} From d314b704ad984fce25ed495ed1133450478181b5 Mon Sep 17 00:00:00 2001 From: Goran Rojovic Date: Wed, 31 Dec 2025 09:18:37 +0100 Subject: [PATCH 53/73] fix: rebase --- bridgesync/downloader_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bridgesync/downloader_test.go b/bridgesync/downloader_test.go index f1d56db4b..a44c0ed3b 100644 --- a/bridgesync/downloader_test.go +++ b/bridgesync/downloader_test.go @@ -666,6 +666,7 @@ func TestBuildAppender(t *testing.T) { name: "forwardLETSignature appender", eventSignature: forwardLETEventSignature, deploymentKind: SovereignChain, + logsCount: 1, logBuilder: func() (types.Log, error) { event, err := bridgeL2Abi.EventByID(forwardLETEventSignature) if err != nil { From dd356828c9ad8f9114cfb4051f540e6869725571 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Wed, 7 Jan 2026 14:20:29 +0530 Subject: [PATCH 54/73] fix run local --- .github/test_e2e_op_args_base.json | 2 +- test/run-local-e2e.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/test_e2e_op_args_base.json b/.github/test_e2e_op_args_base.json index 05428c1b6..2b15a539e 100644 --- a/.github/test_e2e_op_args_base.json +++ b/.github/test_e2e_op_args_base.json @@ -1,7 +1,7 @@ { "deployment_stages": { "deploy_op_succinct": false, - "deploy_cdk_bridge_infra": false + "deploy_cdk_bridge_infra": true }, "args": { "aggkit_image": "aggkit:local", diff --git a/test/run-local-e2e.sh b/test/run-local-e2e.sh index c6ddceebb..e2ebc7a37 100755 --- a/test/run-local-e2e.sh +++ b/test/run-local-e2e.sh @@ -68,7 +68,7 @@ while [[ $# -gt 0 ]]; do echo " $0 single-l2-network-op-succinct /path/to/kurtosis - # Use Kurtosis, skip E2E" exit 0 ;; - -*) + -?*) echo "Unknown option: $1" echo "Use -h or --help for usage information" exit 1 From 360f5ac3408568f0da80649210aa3a3d6a84c1d0 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Wed, 7 Jan 2026 15:12:59 +0530 Subject: [PATCH 55/73] update ref --- .github/workflows/test-e2e.yml | 22 +++++++++++----------- test/run-local-e2e.sh | 16 +++++++--------- 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index cdc03345c..48903062a 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -18,7 +18,7 @@ concurrency: env: AGGKIT_REPORT_CHANNEL: "C092K6Z0EUT" - KURTOSIS_CDK_COMMIT: "8ad1a72ccf5212219cabc67e0d7f542658295185" + KURTOSIS_CDK_COMMIT: "51368c55832e21be2bb6f241e22cdbb0e6bc99fe" permissions: packages: write @@ -173,11 +173,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@76b1ff29e2ab38697aa2fb8d51fd8563bca2bb5f + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@1d7411eb2fd48c09e24691b5081c9997e902936a secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 76b1ff29e2ab38697aa2fb8d51fd8563bca2bb5f # main + agglayer-e2e-ref: 1d7411eb2fd48c09e24691b5081c9997e902936a kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-pessimistic }} test-name: "test-single-l2-network-op-pessimistic" @@ -201,11 +201,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@76b1ff29e2ab38697aa2fb8d51fd8563bca2bb5f + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@1d7411eb2fd48c09e24691b5081c9997e902936a secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 76b1ff29e2ab38697aa2fb8d51fd8563bca2bb5f # main + agglayer-e2e-ref: 1d7411eb2fd48c09e24691b5081c9997e902936a kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct }} aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge @@ -229,12 +229,12 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@76b1ff29e2ab38697aa2fb8d51fd8563bca2bb5f + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@1d7411eb2fd48c09e24691b5081c9997e902936a if: always() && github.event_name == 'schedule' && github.ref == 'refs/heads/develop' secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 76b1ff29e2ab38697aa2fb8d51fd8563bca2bb5f # main + agglayer-e2e-ref: 1d7411eb2fd48c09e24691b5081c9997e902936a kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct-aggoracle-committee }} test-name: "test-single-l2-network-op-succinct-aggoracle-committee" @@ -258,11 +258,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@76b1ff29e2ab38697aa2fb8d51fd8563bca2bb5f + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@1d7411eb2fd48c09e24691b5081c9997e902936a secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 76b1ff29e2ab38697aa2fb8d51fd8563bca2bb5f # main + agglayer-e2e-ref: 1d7411eb2fd48c09e24691b5081c9997e902936a kurtosis-cdk-enclave-name: op aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-1 }} @@ -284,11 +284,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@76b1ff29e2ab38697aa2fb8d51fd8563bca2bb5f + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@1d7411eb2fd48c09e24691b5081c9997e902936a secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 76b1ff29e2ab38697aa2fb8d51fd8563bca2bb5f # main + agglayer-e2e-ref: 1d7411eb2fd48c09e24691b5081c9997e902936a kurtosis-cdk-enclave-name: aggkit aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-3 }} diff --git a/test/run-local-e2e.sh b/test/run-local-e2e.sh index e2ebc7a37..0d1979df2 100755 --- a/test/run-local-e2e.sh +++ b/test/run-local-e2e.sh @@ -394,7 +394,6 @@ if [ "$E2E_REPO_PATH" != "-" ]; then case "$TEST_TYPE" in single-l2-network-op-succinct) bats ./tests/aggkit/bridge-e2e.bats || exit 1 - bats ./tests/aggkit/e2e-pp.bats || exit 1 bats ./tests/aggkit/bridge-sovereign-chain-e2e.bats || exit 1 bats ./tests/aggkit/bridge-e2e-nightly.bats || exit 1 bats ./tests/aggkit/internal-claims.bats || exit 1 @@ -406,14 +405,13 @@ if [ "$E2E_REPO_PATH" != "-" ]; then bats ./tests/aggkit/bridge-e2e-aggoracle-committee.bats || exit 1 ;; single-l2-network-op-pessimistic) - bats ./tests/aggkit/bridge-e2e.bats || exit 1 - bats ./tests/aggkit/e2e-pp.bats || exit 1 - bats ./tests/aggkit/bridge-sovereign-chain-e2e.bats || exit 1 - bats ./tests/op/optimistic-mode.bats || exit 1 - bats ./tests/aggkit/bridge-e2e-nightly.bats || exit 1 - bats ./tests/aggkit/internal-claims.bats || exit 1 - bats ./tests/aggkit/claim-reetrancy.bats || exit 1 - bats ./tests/aggkit/aggsender-committee-updates.bats || exit 1 + # bats ./tests/aggkit/bridge-e2e.bats || exit 1 + bats ./tests/aggkit/bridge-sovereign-chain-e2e.bats -f "Test backwardLET feature" || exit 1 + # bats ./tests/op/optimistic-mode.bats || exit 1 + # bats ./tests/aggkit/bridge-e2e-nightly.bats || exit 1 + # bats ./tests/aggkit/internal-claims.bats || exit 1 + # bats ./tests/aggkit/claim-reetrancy.bats || exit 1 + # bats ./tests/aggkit/aggsender-committee-updates.bats || exit 1 ;; multi-l2-networks-2-chains-op-pessimistic) bats ./tests/aggkit/bridge-e2e-2-chains.bats || exit 1 From 78e7dbcebeb4c55c78b240de619cd91daba4280d Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Thu, 8 Jan 2026 13:23:12 +0530 Subject: [PATCH 56/73] update ref --- ...st_e2e_cdk_erigon_multi_chains_args_2.json | 4 ++-- ...st_e2e_cdk_erigon_multi_chains_args_3.json | 4 ++-- .github/test_e2e_op_args_base.json | 4 ++-- .github/test_e2e_op_args_chain_1.json | 2 +- .github/test_e2e_op_args_chain_2.json | 4 ++-- ..._op_succinct_aggoracle_committee_args.json | 2 +- .github/workflows/test-e2e.yml | 22 +++++++++---------- scripts/local_config_helper | 2 +- test/config/fep-config.toml.template | 2 +- test/config/pp-config.toml.template | 4 ++-- 10 files changed, 25 insertions(+), 25 deletions(-) diff --git a/.github/test_e2e_cdk_erigon_multi_chains_args_2.json b/.github/test_e2e_cdk_erigon_multi_chains_args_2.json index 28465726e..19e9a45f1 100644 --- a/.github/test_e2e_cdk_erigon_multi_chains_args_2.json +++ b/.github/test_e2e_cdk_erigon_multi_chains_args_2.json @@ -5,7 +5,7 @@ }, "args": { "deployment_suffix": "-002", - "zkevm_rollup_chain_id": 20202, - "zkevm_rollup_id": 2 + "l2_chain_id": 20202, + "l2_network_id": 2 } } diff --git a/.github/test_e2e_cdk_erigon_multi_chains_args_3.json b/.github/test_e2e_cdk_erigon_multi_chains_args_3.json index 9057b049b..dffb0443a 100644 --- a/.github/test_e2e_cdk_erigon_multi_chains_args_3.json +++ b/.github/test_e2e_cdk_erigon_multi_chains_args_3.json @@ -5,7 +5,7 @@ }, "args": { "deployment_suffix": "-003", - "zkevm_rollup_chain_id": 20203, - "zkevm_rollup_id": 3 + "l2_chain_id": 20203, + "l2_network_id": 3 } } diff --git a/.github/test_e2e_op_args_base.json b/.github/test_e2e_op_args_base.json index 2b15a539e..7ca02d094 100644 --- a/.github/test_e2e_op_args_base.json +++ b/.github/test_e2e_op_args_base.json @@ -13,7 +13,7 @@ "additional_services": [], "binary_name": "aggkit", "aggkit_components": "aggsender,aggoracle", - "zkevm_rollup_chain_id": 20201, - "zkevm_rollup_id": 1 + "l2_chain_id": 20201, + "l2_network_id": 1 } } diff --git a/.github/test_e2e_op_args_chain_1.json b/.github/test_e2e_op_args_chain_1.json index e0207926b..acca8977e 100644 --- a/.github/test_e2e_op_args_chain_1.json +++ b/.github/test_e2e_op_args_chain_1.json @@ -1,6 +1,6 @@ { "args": { - "zkevm_rollup_chain_id": 20201 + "l2_chain_id": 20201 }, "optimism_package": { "predeployed_contracts": true, diff --git a/.github/test_e2e_op_args_chain_2.json b/.github/test_e2e_op_args_chain_2.json index bd441ad87..a4e0fbb64 100644 --- a/.github/test_e2e_op_args_chain_2.json +++ b/.github/test_e2e_op_args_chain_2.json @@ -5,8 +5,8 @@ }, "args": { "deployment_suffix": "-002", - "zkevm_rollup_chain_id": 20202, - "zkevm_rollup_id": 2 + "l2_chain_id": 20202, + "l2_network_id": 2 }, "optimism_package": { "chains": { diff --git a/.github/test_e2e_single_chain_op_succinct_aggoracle_committee_args.json b/.github/test_e2e_single_chain_op_succinct_aggoracle_committee_args.json index 066189e08..048e037db 100644 --- a/.github/test_e2e_single_chain_op_succinct_aggoracle_committee_args.json +++ b/.github/test_e2e_single_chain_op_succinct_aggoracle_committee_args.json @@ -4,7 +4,7 @@ "use_agg_oracle_committee": true, "agg_oracle_committee_quorum": 2, "agg_oracle_committee_total_members": 3, - "zkevm_rollup_chain_id": 20201 + "l2_chain_id": 20201 }, "optimism_package": { "chains": { diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 48903062a..c8636853a 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -18,7 +18,7 @@ concurrency: env: AGGKIT_REPORT_CHANNEL: "C092K6Z0EUT" - KURTOSIS_CDK_COMMIT: "51368c55832e21be2bb6f241e22cdbb0e6bc99fe" + KURTOSIS_CDK_COMMIT: "507e4e8e6581ab5ecbb44444f9b7f1d05e3dcd1c" permissions: packages: write @@ -173,11 +173,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@1d7411eb2fd48c09e24691b5081c9997e902936a + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@3b29112462a5dc855d6e9900609b95c592e00a46 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 1d7411eb2fd48c09e24691b5081c9997e902936a + agglayer-e2e-ref: 3b29112462a5dc855d6e9900609b95c592e00a46 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-pessimistic }} test-name: "test-single-l2-network-op-pessimistic" @@ -201,11 +201,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@1d7411eb2fd48c09e24691b5081c9997e902936a + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@3b29112462a5dc855d6e9900609b95c592e00a46 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 1d7411eb2fd48c09e24691b5081c9997e902936a + agglayer-e2e-ref: 3b29112462a5dc855d6e9900609b95c592e00a46 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct }} aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge @@ -229,12 +229,12 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@1d7411eb2fd48c09e24691b5081c9997e902936a + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@3b29112462a5dc855d6e9900609b95c592e00a46 if: always() && github.event_name == 'schedule' && github.ref == 'refs/heads/develop' secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 1d7411eb2fd48c09e24691b5081c9997e902936a + agglayer-e2e-ref: 3b29112462a5dc855d6e9900609b95c592e00a46 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct-aggoracle-committee }} test-name: "test-single-l2-network-op-succinct-aggoracle-committee" @@ -258,11 +258,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@1d7411eb2fd48c09e24691b5081c9997e902936a + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@3b29112462a5dc855d6e9900609b95c592e00a46 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 1d7411eb2fd48c09e24691b5081c9997e902936a + agglayer-e2e-ref: 3b29112462a5dc855d6e9900609b95c592e00a46 kurtosis-cdk-enclave-name: op aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-1 }} @@ -284,11 +284,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@1d7411eb2fd48c09e24691b5081c9997e902936a + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@3b29112462a5dc855d6e9900609b95c592e00a46 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 1d7411eb2fd48c09e24691b5081c9997e902936a + agglayer-e2e-ref: 3b29112462a5dc855d6e9900609b95c592e00a46 kurtosis-cdk-enclave-name: aggkit aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-3 }} diff --git a/scripts/local_config_helper b/scripts/local_config_helper index 858605273..2aa4ea62c 100644 --- a/scripts/local_config_helper +++ b/scripts/local_config_helper @@ -257,7 +257,7 @@ function common_export_values_of_aggkit_config() { export_key_from_toml_file_or_fatal pol_token_address $AGGKIT_CONFIG_FILE L1Config polTokenAddress export_key_from_toml_file_or_fatal zkevm_rollup_manager_address $AGGKIT_CONFIG_FILE L1Config polygonRollupManagerAddress export_key_from_toml_file_or_fatal zkevm_global_exit_root_address $AGGKIT_CONFIG_FILE L1Config polygonZkEVMGlobalExitRootAddress - export_key_from_toml_file_or_fatal zkevm_rollup_chain_id $AGGKIT_CONFIG_FILE AggOracle.EVMSender.EthTxManager.Etherman L1ChainID + export_key_from_toml_file_or_fatal l2_chain_id $AGGKIT_CONFIG_FILE AggOracle.EVMSender.EthTxManager.Etherman L1ChainID export_key_from_toml_file_or_fatal zkevm_bridge_address $AGGKIT_CONFIG_FILE L1Config BridgeAddr export_key_from_toml_file_or_fatal zkevm_bridge_l2_address $AGGKIT_CONFIG_FILE L2Config BridgeAddr } diff --git a/test/config/fep-config.toml.template b/test/config/fep-config.toml.template index b08839fd9..9ca6628ee 100644 --- a/test/config/fep-config.toml.template +++ b/test/config/fep-config.toml.template @@ -323,7 +323,7 @@ PrivateKeys = [{Path = "{{.zkevm_aggoracle_privatekey_path}}", Password = "{{.zk # ------------------------------------------------------------------------------ # Needs to be set to be the sovereign L2 chain id # ------------------------------------------------------------------------------ -L1ChainID = "{{.zkevm_rollup_chain_id}}" +L1ChainID = "{{.l2_chain_id}}" # ============================================================================== # ____ ____ ___ ____ ____ _____ _ ____ ______ ___ _ ____ diff --git a/test/config/pp-config.toml.template b/test/config/pp-config.toml.template index a10f095b5..43546961a 100644 --- a/test/config/pp-config.toml.template +++ b/test/config/pp-config.toml.template @@ -371,7 +371,7 @@ PrivateKeys = [{Path = "{{.zkevm_aggoracle_privatekey_path}}", Password = "{{.zk # ------------------------------------------------------------------------------ # Needs to be set to be the sovereign L2 chain id # ------------------------------------------------------------------------------ -L1ChainID = "{{.zkevm_rollup_chain_id}}" +L1ChainID = "{{.l2_chain_id}}" # ============================================================================== # ____ ____ ___ ____ ____ _____ _ ____ ______ ___ _ ____ @@ -494,7 +494,7 @@ BridgeAddrL2 = "{{.zkevm_bridge_l2_address}}" # Needs to be set to be the sovereign L2 chain id # If set to 0, the chain ID will be fetched from the RPC endpoint # ------------------------------------------------------------------------------ -L1ChainID = "{{.zkevm_rollup_chain_id}}" +L1ChainID = "{{.l2_chain_id}}" # ============================================================================== # ____ __ _ _ _ From d45d26af6642544ddcd29bb71ab587c15443f7da Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Thu, 8 Jan 2026 13:29:34 +0530 Subject: [PATCH 57/73] update config --- .github/test_e2e_cdk_erigon_args_base.json | 2 +- .github/test_e2e_op_args_base.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/test_e2e_cdk_erigon_args_base.json b/.github/test_e2e_cdk_erigon_args_base.json index dbf54fc94..25dced5d3 100644 --- a/.github/test_e2e_cdk_erigon_args_base.json +++ b/.github/test_e2e_cdk_erigon_args_base.json @@ -5,7 +5,7 @@ "deploy_cdk_bridge_infra": false }, "args": { - "consensus_contract_type": "ecdsa_multisig", + "consensus_contract_type": "ecdsa-multisig", "use_agg_sender_validator": false, "agg_sender_multisig_threshold": 1, "agg_sender_validator_total_number": 0, diff --git a/.github/test_e2e_op_args_base.json b/.github/test_e2e_op_args_base.json index 7ca02d094..d82314258 100644 --- a/.github/test_e2e_op_args_base.json +++ b/.github/test_e2e_op_args_base.json @@ -5,7 +5,7 @@ }, "args": { "aggkit_image": "aggkit:local", - "consensus_contract_type": "ecdsa_multisig", + "consensus_contract_type": "ecdsa-multisig", "use_agg_sender_validator": true, "agg_sender_multisig_threshold": 2, "agg_sender_validator_total_number": 3, From 49c68b3c7f897fce9d8f07c9de08fc3cd253eb93 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Thu, 8 Jan 2026 15:12:42 +0530 Subject: [PATCH 58/73] fix --- test/run-local-e2e.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/run-local-e2e.sh b/test/run-local-e2e.sh index 0d1979df2..495dc9a69 100755 --- a/test/run-local-e2e.sh +++ b/test/run-local-e2e.sh @@ -168,10 +168,10 @@ single-l2-network-op-succinct-aggoracle-committee) ENCLAVE_NAME="op" ;; single-l2-network-op-pessimistic) - ENCLAVE_NAME="aggkit" + ENCLAVE_NAME="op" ;; multi-l2-networks-2-chains-op-pessimistic) - ENCLAVE_NAME="aggkit" + ENCLAVE_NAME="op" ;; multi-l2-networks-3-chains-cdk-erigon-pessimistic) ENCLAVE_NAME="aggkit" From 91020126f13b71a0b00eb7e5797ab6d8749d7b66 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Thu, 8 Jan 2026 17:41:02 +0530 Subject: [PATCH 59/73] fix err related to deposit count --- bridgesync/processor.go | 8 +++++--- scripts/local_config_helper | 40 +++++++++++++++++++++++++++++++++---- test/run-local-e2e.sh | 2 +- 3 files changed, 42 insertions(+), 8 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index fe1cb8412..2380410ed 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1405,12 +1405,12 @@ func (p *processor) restoreBackwardLETBridges(tx dbtypes.Txer, backwardLETs []*B ` for _, backwardLET := range backwardLETs { - prev, err := aggkitcommon.SafeUint64(backwardLET.PreviousDepositCount) + prev, err := aggkitcommon.SafeUint64(new(big.Int).Sub(backwardLET.PreviousDepositCount, big.NewInt(1))) if err != nil { return fmt.Errorf("invalid previous deposit count: %w", err) } - next, err := aggkitcommon.SafeUint64(backwardLET.NewDepositCount) + next, err := aggkitcommon.SafeUint64(new(big.Int).Sub(backwardLET.NewDepositCount, big.NewInt(1))) if err != nil { return fmt.Errorf("invalid new deposit count: %w", err) } @@ -1590,7 +1590,7 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } - newDepositCount, leafIndex, err := normalizeDepositCount(event.BackwardLET.NewDepositCount) + newDepositCount, leafIndex, err := normalizeDepositCount(new(big.Int).Sub(event.BackwardLET.NewDepositCount, big.NewInt(1))) if err != nil { return err } @@ -1610,6 +1610,8 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } + fmt.Printf("backward let event %s\n", event.BackwardLET.String()) + // 4. sanity check that the new root matches the latest one in the exit tree if err := p.sanityCheckLatestLER(tx, event.BackwardLET.NewRoot); err != nil { p.log.Errorf("failed to sanity check LER after processing BackwardLET: %v", err) diff --git a/scripts/local_config_helper b/scripts/local_config_helper index 2aa4ea62c..7038455fe 100644 --- a/scripts/local_config_helper +++ b/scripts/local_config_helper @@ -236,14 +236,46 @@ function create_dest_folder() { } ############################################################################### function download_kurtosis_artifacts() { - kurtosis files download $KURTOSIS_ENCLAVE aggkit-sequencer-keystore $DEST + # Construct artifact name from service name (e.g., aggkit-001 -> aggkit-config-001) + # Allow override via environment variable, otherwise construct from AGGKIT_SERVICE + local AGGKIT_CONFIG_ARTIFACT + if [ -n "${KURTOSIS_ARTIFACT_AGGKIT_CONFIG:-}" ]; then + AGGKIT_CONFIG_ARTIFACT="$KURTOSIS_ARTIFACT_AGGKIT_CONFIG" + else + # Extract service number from AGGKIT_SERVICE (e.g., aggkit-001 -> 001) + local SERVICE_NUMBER=$(echo "$AGGKIT_SERVICE" | sed 's/.*-//') + AGGKIT_CONFIG_ARTIFACT="aggkit-config-${SERVICE_NUMBER}" + fi + + kurtosis files download "$KURTOSIS_ENCLAVE" "aggkit-sequencer-keystore" "$DEST" ok_or_fatal "Error downloading kurtosis artifact aggkit-sequencer-keystore to $DEST" export zkevm_l2_sequencer_keystore_file_path=$DEST/sequencer.keystore - kurtosis files download $KURTOSIS_ENCLAVE aggkit-config-artifact $DEST - ok_or_fatal "Error downloading kurtosis artifact aggkit-config-artifact to $DEST" + if ! kurtosis files download "$KURTOSIS_ENCLAVE" "$AGGKIT_CONFIG_ARTIFACT" "$DEST" 2>/dev/null; then + log_error "Failed to download kurtosis artifact '$AGGKIT_CONFIG_ARTIFACT' from enclave '$KURTOSIS_ENCLAVE'" + log_error "This artifact is expected to be created by the Kurtosis CDK setup." + log_error "" + log_error "Current configuration:" + log_error " AGGKIT_SERVICE: $AGGKIT_SERVICE" + log_error " Constructed artifact name: $AGGKIT_CONFIG_ARTIFACT" + log_error "" + log_error "Possible solutions:" + log_error "1. Ensure the Kurtosis environment was started correctly" + log_error "2. Check that the Kurtosis CDK version matches the expected commit" + log_error "3. Verify the enclave name is correct (current: $KURTOSIS_ENCLAVE)" + log_error "4. Try restarting the Kurtosis environment:" + log_error " kurtosis clean --all" + log_error " kurtosis run --enclave $KURTOSIS_ENCLAVE --args-file params.yml --image-download always ." + log_error "" + log_error "You can override the artifact name by setting:" + log_error " export KURTOSIS_ARTIFACT_AGGKIT_CONFIG=" + log_error "" + log_error "You can check available artifacts with:" + log_error " kurtosis enclave inspect $KURTOSIS_ENCLAVE | grep aggkit" + log_fatal "Error downloading kurtosis artifact $AGGKIT_CONFIG_ARTIFACT to $DEST" + fi - kurtosis files download $KURTOSIS_ENCLAVE aggoracle-keystore $DEST + kurtosis files download "$KURTOSIS_ENCLAVE" "aggoracle-keystore" "$DEST" ok_or_fatal "Error downloading kurtosis artifact aggoracle to $DEST" export zkevm_aggoracle_privatekey_path=$DEST/aggoracle.keystore } diff --git a/test/run-local-e2e.sh b/test/run-local-e2e.sh index 495dc9a69..2bd6e0a9c 100755 --- a/test/run-local-e2e.sh +++ b/test/run-local-e2e.sh @@ -406,7 +406,7 @@ if [ "$E2E_REPO_PATH" != "-" ]; then ;; single-l2-network-op-pessimistic) # bats ./tests/aggkit/bridge-e2e.bats || exit 1 - bats ./tests/aggkit/bridge-sovereign-chain-e2e.bats -f "Test backwardLET feature" || exit 1 + bats ./tests/aggkit/bridge-sovereign-chain-e2e.bats -f "Test backwardlet, forwardlet feature" || exit 1 # bats ./tests/op/optimistic-mode.bats || exit 1 # bats ./tests/aggkit/bridge-e2e-nightly.bats || exit 1 # bats ./tests/aggkit/internal-claims.bats || exit 1 From 933f3cc9dee37d20f6639cc2de558182802f4496 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Thu, 8 Jan 2026 19:37:38 +0530 Subject: [PATCH 60/73] update ref --- .github/workflows/test-e2e.yml | 20 ++++++++++---------- bridgesync/processor.go | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index c8636853a..efe8e29d3 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -173,11 +173,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@3b29112462a5dc855d6e9900609b95c592e00a46 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 3b29112462a5dc855d6e9900609b95c592e00a46 + agglayer-e2e-ref: 64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-pessimistic }} test-name: "test-single-l2-network-op-pessimistic" @@ -201,11 +201,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@3b29112462a5dc855d6e9900609b95c592e00a46 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 3b29112462a5dc855d6e9900609b95c592e00a46 + agglayer-e2e-ref: 64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct }} aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge @@ -229,12 +229,12 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@3b29112462a5dc855d6e9900609b95c592e00a46 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 if: always() && github.event_name == 'schedule' && github.ref == 'refs/heads/develop' secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 3b29112462a5dc855d6e9900609b95c592e00a46 + agglayer-e2e-ref: 64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct-aggoracle-committee }} test-name: "test-single-l2-network-op-succinct-aggoracle-committee" @@ -258,11 +258,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@3b29112462a5dc855d6e9900609b95c592e00a46 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 3b29112462a5dc855d6e9900609b95c592e00a46 + agglayer-e2e-ref: 64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 kurtosis-cdk-enclave-name: op aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-1 }} @@ -284,11 +284,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@3b29112462a5dc855d6e9900609b95c592e00a46 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 3b29112462a5dc855d6e9900609b95c592e00a46 + agglayer-e2e-ref: 64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 kurtosis-cdk-enclave-name: aggkit aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-3 }} diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 2380410ed..fc832c541 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1756,7 +1756,7 @@ func (p *processor) handleForwardLETEvent(tx dbtypes.Txer, event *ForwardLET, bl return 0, fmt.Errorf("failed to decode new leaves in forward LET: %w", err) } - newDepositCount := uint32(event.PreviousDepositCount.Uint64()) + 1 + newDepositCount := uint32(event.PreviousDepositCount.Uint64()) newBlockPos := event.BlockPos if blockPos != nil { newBlockPos = *blockPos From 82b9c89c70a97fe7c06c0a452d4188839d0cf7c9 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Thu, 8 Jan 2026 21:17:22 +0530 Subject: [PATCH 61/73] update tests --- bridgesync/processor.go | 5 ++-- bridgesync/processor_test.go | 52 ++++++++++++++++++------------------ 2 files changed, 28 insertions(+), 29 deletions(-) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index fc832c541..8c795fc97 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -1590,7 +1590,8 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } - newDepositCount, leafIndex, err := normalizeDepositCount(new(big.Int).Sub(event.BackwardLET.NewDepositCount, big.NewInt(1))) + adjustedCount := new(big.Int).Sub(event.BackwardLET.NewDepositCount, big.NewInt(1)) + newDepositCount, leafIndex, err := normalizeDepositCount(adjustedCount) if err != nil { return err } @@ -1610,8 +1611,6 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } - fmt.Printf("backward let event %s\n", event.BackwardLET.String()) - // 4. sanity check that the new root matches the latest one in the exit tree if err := p.sanityCheckLatestLER(tx, event.BackwardLET.NewRoot); err != nil { p.log.Errorf("failed to sanity check LER after processing BackwardLET: %v", err) diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 7223b5ac3..289f12139 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -487,8 +487,8 @@ var ( Event{BackwardLET: &BackwardLET{ BlockNum: 5, BlockPos: 4, - PreviousDepositCount: big.NewInt(3), - NewDepositCount: big.NewInt(2), + PreviousDepositCount: big.NewInt(4), + NewDepositCount: big.NewInt(3), PreviousRoot: common.HexToHash("0x15cd4b94cacc2cf50d055e1adb5fbfe5cd95485e121a5c411d73e263f2a66685"), NewRoot: common.HexToHash("0xa03113d9ce128863f29479689c82d0b37ebc9432c569c3a57f22d6c008256c5b"), }}, @@ -5447,8 +5447,8 @@ func TestProcessor_BackwardLET(t *testing.T) { Event{BackwardLET: &BackwardLET{ BlockNum: uint64(len(blocks) + 1), BlockPos: 0, - PreviousDepositCount: big.NewInt(3), - NewDepositCount: big.NewInt(2), + PreviousDepositCount: big.NewInt(4), + NewDepositCount: big.NewInt(3), PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), NewRoot: common.HexToHash("0xa9d31ebbb97c7cd7c7103bee8af7d0b4c83771939baba0b415b0f94c4c39fd84"), }}, @@ -5471,8 +5471,8 @@ func TestProcessor_BackwardLET(t *testing.T) { Event{BackwardLET: &BackwardLET{ BlockNum: uint64(len(blocks) + 1), BlockPos: 0, - PreviousDepositCount: big.NewInt(5), - NewDepositCount: big.NewInt(0), + PreviousDepositCount: big.NewInt(6), + NewDepositCount: big.NewInt(1), PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), NewRoot: common.HexToHash("0x283c52c3d10a22d01f95f5bcab5e823675c9855bd40b1e82f32b0437b3b6a446"), }}, @@ -5495,8 +5495,8 @@ func TestProcessor_BackwardLET(t *testing.T) { Event{BackwardLET: &BackwardLET{ BlockNum: uint64(len(blocks) + 1), BlockPos: 0, - PreviousDepositCount: big.NewInt(5), - NewDepositCount: big.NewInt(4), + PreviousDepositCount: big.NewInt(6), + NewDepositCount: big.NewInt(5), PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), NewRoot: common.HexToHash("0x44e1bf8449ecec2b8b1d123fab00d33c9acb308e590605adf5f6e2de4d1c1133"), }}, @@ -5520,8 +5520,8 @@ func TestProcessor_BackwardLET(t *testing.T) { Event{BackwardLET: &BackwardLET{ BlockNum: uint64(len(blocks) + 1), BlockPos: 0, - PreviousDepositCount: big.NewInt(5), - NewDepositCount: big.NewInt(2), + PreviousDepositCount: big.NewInt(6), + NewDepositCount: big.NewInt(3), PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), NewRoot: common.HexToHash("0xa9d31ebbb97c7cd7c7103bee8af7d0b4c83771939baba0b415b0f94c4c39fd84"), }}, @@ -5547,8 +5547,8 @@ func TestProcessor_BackwardLET(t *testing.T) { Event{BackwardLET: &BackwardLET{ BlockNum: uint64(len(blocks) + 1), BlockPos: 0, - PreviousDepositCount: big.NewInt(5), - NewDepositCount: big.NewInt(3), + PreviousDepositCount: big.NewInt(6), + NewDepositCount: big.NewInt(4), PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), NewRoot: common.HexToHash("0x7533c9ef58edd0bea7959a20c33ed47e5548d35f4ff140c5c915740fe6800fb8"), }}, @@ -5585,8 +5585,8 @@ func TestProcessor_BackwardLET(t *testing.T) { Event{BackwardLET: &BackwardLET{ BlockNum: 1, BlockPos: 0, - PreviousDepositCount: big.NewInt(6), - NewDepositCount: big.NewInt(3), + PreviousDepositCount: big.NewInt(7), + NewDepositCount: big.NewInt(4), }}, }, }} @@ -5642,8 +5642,8 @@ func TestProcessor_BackwardLET(t *testing.T) { Event{BackwardLET: &BackwardLET{ BlockNum: 4, BlockPos: 0, - PreviousDepositCount: big.NewInt(5), - NewDepositCount: big.NewInt(2), + PreviousDepositCount: big.NewInt(6), + NewDepositCount: big.NewInt(3), PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), NewRoot: common.HexToHash("0xa9d31ebbb97c7cd7c7103bee8af7d0b4c83771939baba0b415b0f94c4c39fd84"), }}, @@ -5668,8 +5668,8 @@ func TestProcessor_BackwardLET(t *testing.T) { Event{BackwardLET: &BackwardLET{ BlockNum: uint64(len(blocks) + 1), BlockPos: 0, - PreviousDepositCount: big.NewInt(5), - NewDepositCount: big.NewInt(2), + PreviousDepositCount: big.NewInt(6), + NewDepositCount: big.NewInt(3), PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), NewRoot: common.HexToHash("0xa9d31ebbb97c7cd7c7103bee8af7d0b4c83771939baba0b415b0f94c4c39fd84"), }}, @@ -5888,7 +5888,7 @@ func TestHandleForwardLETEvent(t *testing.T) { BlockPos: 5, BlockTimestamp: 1234567890, TxnHash: common.HexToHash("0xabc123"), - PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousDepositCount: big.NewInt(int64(initialDepositCount + 1)), PreviousRoot: initialRoot, NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), NewLeaves: encodedLeaves, @@ -5993,7 +5993,7 @@ func TestHandleForwardLETEvent(t *testing.T) { BlockPos: 10, BlockTimestamp: 1234567900, TxnHash: common.HexToHash("0xdef456"), - PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousDepositCount: big.NewInt(int64(initialDepositCount + 1)), PreviousRoot: initialRoot, NewDepositCount: big.NewInt(int64(initialDepositCount + uint32(len(leaves)))), NewLeaves: encodedLeaves, @@ -6092,7 +6092,7 @@ func TestHandleForwardLETEvent(t *testing.T) { BlockPos: 20, BlockTimestamp: 1234567950, TxnHash: common.HexToHash("0xforward789"), - PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousDepositCount: big.NewInt(int64(initialDepositCount + 1)), PreviousRoot: initialRoot, NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), NewLeaves: encodedLeaves, @@ -6201,7 +6201,7 @@ func TestHandleForwardLETEvent(t *testing.T) { BlockPos: 30, BlockTimestamp: 1234567999, TxnHash: common.HexToHash("0xforward999"), - PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousDepositCount: big.NewInt(int64(initialDepositCount + 1)), PreviousRoot: initialRoot, NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), NewLeaves: encodedLeaves, @@ -6262,7 +6262,7 @@ func TestHandleForwardLETEvent(t *testing.T) { BlockPos: 5, BlockTimestamp: 1234567890, TxnHash: common.HexToHash("0xabc123"), - PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousDepositCount: big.NewInt(int64(initialDepositCount + 1)), PreviousRoot: common.HexToHash("0xWRONG"), // Wrong root NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), NewRoot: common.HexToHash("0x999"), @@ -6319,7 +6319,7 @@ func TestHandleForwardLETEvent(t *testing.T) { BlockPos: 5, BlockTimestamp: 1234567890, TxnHash: common.HexToHash("0xabc123"), - PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousDepositCount: big.NewInt(int64(initialDepositCount + 1)), PreviousRoot: initialRoot, NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), NewRoot: common.HexToHash("0xWRONG"), // Wrong new root @@ -6352,7 +6352,7 @@ func TestHandleForwardLETEvent(t *testing.T) { BlockPos: 5, BlockTimestamp: 1234567890, TxnHash: common.HexToHash("0xabc123"), - PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousDepositCount: big.NewInt(int64(initialDepositCount + 1)), PreviousRoot: initialRoot, NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), NewRoot: common.Hash{}, @@ -6407,7 +6407,7 @@ func TestHandleForwardLETEvent(t *testing.T) { BlockPos: 5, BlockTimestamp: 1234567890, TxnHash: common.HexToHash("0xabc123"), - PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousDepositCount: big.NewInt(int64(initialDepositCount + 1)), PreviousRoot: initialRoot, NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), NewLeaves: encodedLeaves, From 65a195a0b14608d883544da34c5088697a05ef1b Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Thu, 8 Jan 2026 21:38:42 +0530 Subject: [PATCH 62/73] update tests --- bridgesync/processor_test.go | 77 ------------------------------------ 1 file changed, 77 deletions(-) diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 289f12139..8adeb7eaf 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -5,7 +5,6 @@ import ( "encoding/json" "errors" "fmt" - "math" "math/big" "os" "path" @@ -5536,44 +5535,6 @@ func TestProcessor_BackwardLET(t *testing.T) { skipBlocks: []uint64{2, 3}, // all the bridges from these blocks were backwarded archivedDepositCounts: []uint32{3, 4, 5}, }, - { - name: "overlapping backward let events", - setupBlocks: func() []sync.Block { - blocks := buildBlocksWithSequentialBridges(3, 2, 0, 0) - blocks = append(blocks, sync.Block{ - Num: uint64(len(blocks) + 1), - Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), - Events: []any{ - Event{BackwardLET: &BackwardLET{ - BlockNum: uint64(len(blocks) + 1), - BlockPos: 0, - PreviousDepositCount: big.NewInt(6), - NewDepositCount: big.NewInt(4), - PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), - NewRoot: common.HexToHash("0x7533c9ef58edd0bea7959a20c33ed47e5548d35f4ff140c5c915740fe6800fb8"), - }}, - }, - }) - blocks = append(blocks, sync.Block{ - Num: uint64(len(blocks) + 2), - Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+2)), - Events: []any{ - Event{BackwardLET: &BackwardLET{ - BlockNum: uint64(len(blocks) + 2), - BlockPos: 0, - PreviousDepositCount: big.NewInt(4), - NewDepositCount: big.NewInt(3), - PreviousRoot: common.HexToHash("0x7533c9ef58edd0bea7959a20c33ed47e5548d35f4ff140c5c915740fe6800fb8"), - NewRoot: common.HexToHash("0x7533c9ef58edd0bea7959a20c33ed47e5548d35f4ff140c5c915740fe6800fb8"), - }}, - }, - }) - - return blocks - }, - targetDepositCount: 3, - archivedDepositCounts: []uint32{4, 5}, - }, { name: "backward let on empty bridge table", setupBlocks: func() []sync.Block { @@ -5593,44 +5554,6 @@ func TestProcessor_BackwardLET(t *testing.T) { }, targetDepositCount: 0, }, - { - name: "backward let invalid new deposit count (outside of uint64 range)", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{BackwardLET: &BackwardLET{ - BlockNum: 1, - BlockPos: 0, - PreviousDepositCount: big.NewInt(0), - NewDepositCount: big.NewInt(-3), - }}, - }, - }} - }, - processBlockErrMsg: "invalid deposit count: value=-3 does not fit in uint64", - }, - { - name: "backward let invalid new deposit count (outside of uint32 range)", - setupBlocks: func() []sync.Block { - return []sync.Block{ - { - Num: 1, - Hash: common.HexToHash("0x1"), - Events: []any{ - Event{BackwardLET: &BackwardLET{ - BlockNum: 1, - BlockPos: 0, - PreviousDepositCount: big.NewInt(0), - NewDepositCount: new(big.Int).SetUint64(uint64(math.MaxUint32) + 1), - }}, - }, - }} - }, - processBlockErrMsg: "invalid deposit count: value=4294967296 exceeds uint32 max", - }, { name: "backward let after a couple of bridges + reorg backward let", setupBlocks: func() []sync.Block { From b7600ba96272d2d05da60b93164c470dd1be01ab Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Thu, 8 Jan 2026 22:22:24 +0530 Subject: [PATCH 63/73] update ref --- .github/workflows/test-e2e.yml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index efe8e29d3..98337e34c 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -173,11 +173,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 + agglayer-e2e-ref: bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-pessimistic }} test-name: "test-single-l2-network-op-pessimistic" @@ -201,11 +201,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 + agglayer-e2e-ref: bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct }} aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge @@ -229,12 +229,12 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e if: always() && github.event_name == 'schedule' && github.ref == 'refs/heads/develop' secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 + agglayer-e2e-ref: bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct-aggoracle-committee }} test-name: "test-single-l2-network-op-succinct-aggoracle-committee" @@ -258,11 +258,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 + agglayer-e2e-ref: bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e kurtosis-cdk-enclave-name: op aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-1 }} @@ -284,11 +284,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 64cedb5666ba92e9fb7b4c0a6798a18859ee3a72 + agglayer-e2e-ref: bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e kurtosis-cdk-enclave-name: aggkit aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-3 }} From 3b3aa601690e7b7def58db89e7c9a06c5a1d3abe Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Fri, 9 Jan 2026 10:08:01 +0530 Subject: [PATCH 64/73] update ref --- .github/workflows/test-e2e.yml | 20 ++++++++++---------- test/run-local-e2e.sh | 14 +++++++------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 98337e34c..810096b4b 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -173,11 +173,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@0e664ec01cc4edde00a874a0b8fed5132fc3099c secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e + agglayer-e2e-ref: 0e664ec01cc4edde00a874a0b8fed5132fc3099c kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-pessimistic }} test-name: "test-single-l2-network-op-pessimistic" @@ -201,11 +201,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@0e664ec01cc4edde00a874a0b8fed5132fc3099c secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e + agglayer-e2e-ref: 0e664ec01cc4edde00a874a0b8fed5132fc3099c kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct }} aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge @@ -229,12 +229,12 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@0e664ec01cc4edde00a874a0b8fed5132fc3099c if: always() && github.event_name == 'schedule' && github.ref == 'refs/heads/develop' secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e + agglayer-e2e-ref: 0e664ec01cc4edde00a874a0b8fed5132fc3099c kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct-aggoracle-committee }} test-name: "test-single-l2-network-op-succinct-aggoracle-committee" @@ -258,11 +258,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@0e664ec01cc4edde00a874a0b8fed5132fc3099c secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e + agglayer-e2e-ref: 0e664ec01cc4edde00a874a0b8fed5132fc3099c kurtosis-cdk-enclave-name: op aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-1 }} @@ -284,11 +284,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@0e664ec01cc4edde00a874a0b8fed5132fc3099c secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: bc2ca8bca942a4dfc8ce11781dcfc590f2950a7e + agglayer-e2e-ref: 0e664ec01cc4edde00a874a0b8fed5132fc3099c kurtosis-cdk-enclave-name: aggkit aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-3 }} diff --git a/test/run-local-e2e.sh b/test/run-local-e2e.sh index 2bd6e0a9c..a8ac71135 100755 --- a/test/run-local-e2e.sh +++ b/test/run-local-e2e.sh @@ -405,13 +405,13 @@ if [ "$E2E_REPO_PATH" != "-" ]; then bats ./tests/aggkit/bridge-e2e-aggoracle-committee.bats || exit 1 ;; single-l2-network-op-pessimistic) - # bats ./tests/aggkit/bridge-e2e.bats || exit 1 - bats ./tests/aggkit/bridge-sovereign-chain-e2e.bats -f "Test backwardlet, forwardlet feature" || exit 1 - # bats ./tests/op/optimistic-mode.bats || exit 1 - # bats ./tests/aggkit/bridge-e2e-nightly.bats || exit 1 - # bats ./tests/aggkit/internal-claims.bats || exit 1 - # bats ./tests/aggkit/claim-reetrancy.bats || exit 1 - # bats ./tests/aggkit/aggsender-committee-updates.bats || exit 1 + bats ./tests/aggkit/bridge-e2e.bats || exit 1 + bats ./tests/aggkit/bridge-sovereign-chain-e2e.bats || exit 1 + bats ./tests/op/optimistic-mode.bats || exit 1 + bats ./tests/aggkit/bridge-e2e-nightly.bats || exit 1 + bats ./tests/aggkit/internal-claims.bats || exit 1 + bats ./tests/aggkit/claim-reetrancy.bats || exit 1 + bats ./tests/aggkit/aggsender-committee-updates.bats || exit 1 ;; multi-l2-networks-2-chains-op-pessimistic) bats ./tests/aggkit/bridge-e2e-2-chains.bats || exit 1 From dc758c7959ba5bf7ae14f8d7b7bbbee3f8ca828c Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Fri, 9 Jan 2026 10:30:15 +0530 Subject: [PATCH 65/73] update ref --- .github/workflows/test-e2e.yml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 810096b4b..846340014 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -173,11 +173,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@0e664ec01cc4edde00a874a0b8fed5132fc3099c + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 0e664ec01cc4edde00a874a0b8fed5132fc3099c + agglayer-e2e-ref: 82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-pessimistic }} test-name: "test-single-l2-network-op-pessimistic" @@ -201,11 +201,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@0e664ec01cc4edde00a874a0b8fed5132fc3099c + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 0e664ec01cc4edde00a874a0b8fed5132fc3099c + agglayer-e2e-ref: 82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct }} aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge @@ -229,12 +229,12 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@0e664ec01cc4edde00a874a0b8fed5132fc3099c + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 if: always() && github.event_name == 'schedule' && github.ref == 'refs/heads/develop' secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 0e664ec01cc4edde00a874a0b8fed5132fc3099c + agglayer-e2e-ref: 82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct-aggoracle-committee }} test-name: "test-single-l2-network-op-succinct-aggoracle-committee" @@ -258,11 +258,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@0e664ec01cc4edde00a874a0b8fed5132fc3099c + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 0e664ec01cc4edde00a874a0b8fed5132fc3099c + agglayer-e2e-ref: 82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 kurtosis-cdk-enclave-name: op aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-1 }} @@ -284,11 +284,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@0e664ec01cc4edde00a874a0b8fed5132fc3099c + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 0e664ec01cc4edde00a874a0b8fed5132fc3099c + agglayer-e2e-ref: 82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 kurtosis-cdk-enclave-name: aggkit aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-3 }} From 1e64edb4c0b77946b5d4aa851331d61c1bc12fa4 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Fri, 9 Jan 2026 15:22:53 +0530 Subject: [PATCH 66/73] update ref --- .github/workflows/test-e2e.yml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 846340014..b5f538279 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -173,11 +173,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@e97583c88a065c6f8f81d7b32010e0bb64c1887c secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 + agglayer-e2e-ref: e97583c88a065c6f8f81d7b32010e0bb64c1887c kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-pessimistic }} test-name: "test-single-l2-network-op-pessimistic" @@ -201,11 +201,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@e97583c88a065c6f8f81d7b32010e0bb64c1887c secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 + agglayer-e2e-ref: e97583c88a065c6f8f81d7b32010e0bb64c1887c kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct }} aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge @@ -229,12 +229,12 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@e97583c88a065c6f8f81d7b32010e0bb64c1887c if: always() && github.event_name == 'schedule' && github.ref == 'refs/heads/develop' secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 + agglayer-e2e-ref: e97583c88a065c6f8f81d7b32010e0bb64c1887c kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct-aggoracle-committee }} test-name: "test-single-l2-network-op-succinct-aggoracle-committee" @@ -258,11 +258,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@e97583c88a065c6f8f81d7b32010e0bb64c1887c secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 + agglayer-e2e-ref: e97583c88a065c6f8f81d7b32010e0bb64c1887c kurtosis-cdk-enclave-name: op aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-1 }} @@ -284,11 +284,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@e97583c88a065c6f8f81d7b32010e0bb64c1887c secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 82ab9077363e43c7fd3fc1dcb54e2f4789215eb1 + agglayer-e2e-ref: e97583c88a065c6f8f81d7b32010e0bb64c1887c kurtosis-cdk-enclave-name: aggkit aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-3 }} From 825293b205150d58b446bdf56a474b36af1c9b67 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Sat, 10 Jan 2026 11:01:47 +0530 Subject: [PATCH 67/73] update ref --- .github/test_e2e_cdk_erigon_args_base.json | 2 +- .github/test_e2e_op_args_base.json | 2 +- .github/workflows/test-e2e.yml | 22 +++++++++++----------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/test_e2e_cdk_erigon_args_base.json b/.github/test_e2e_cdk_erigon_args_base.json index 25dced5d3..1e6d027d0 100644 --- a/.github/test_e2e_cdk_erigon_args_base.json +++ b/.github/test_e2e_cdk_erigon_args_base.json @@ -10,7 +10,7 @@ "agg_sender_multisig_threshold": 1, "agg_sender_validator_total_number": 0, "binary_name": "aggkit", - "log_level": "debug", + "log_level": "info", "aggkit_components": "aggsender", "aggkit_image": "aggkit:local", "agglayer_prover_primary_prover": "mock-prover", diff --git a/.github/test_e2e_op_args_base.json b/.github/test_e2e_op_args_base.json index d82314258..209b2c866 100644 --- a/.github/test_e2e_op_args_base.json +++ b/.github/test_e2e_op_args_base.json @@ -9,7 +9,7 @@ "use_agg_sender_validator": true, "agg_sender_multisig_threshold": 2, "agg_sender_validator_total_number": 3, - "log_level": "debug", + "log_level": "info", "additional_services": [], "binary_name": "aggkit", "aggkit_components": "aggsender,aggoracle", diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index b5f538279..3663320d7 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -18,7 +18,7 @@ concurrency: env: AGGKIT_REPORT_CHANNEL: "C092K6Z0EUT" - KURTOSIS_CDK_COMMIT: "507e4e8e6581ab5ecbb44444f9b7f1d05e3dcd1c" + KURTOSIS_CDK_COMMIT: "e6a2ea5f11b525233501d673119a404d86b6c8d4" permissions: packages: write @@ -173,11 +173,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@e97583c88a065c6f8f81d7b32010e0bb64c1887c + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@88a4ffe04db1c5148f1b29b82a615825c7d83c7e secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: e97583c88a065c6f8f81d7b32010e0bb64c1887c + agglayer-e2e-ref: 88a4ffe04db1c5148f1b29b82a615825c7d83c7e kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-pessimistic }} test-name: "test-single-l2-network-op-pessimistic" @@ -201,11 +201,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@e97583c88a065c6f8f81d7b32010e0bb64c1887c + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@88a4ffe04db1c5148f1b29b82a615825c7d83c7e secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: e97583c88a065c6f8f81d7b32010e0bb64c1887c + agglayer-e2e-ref: 88a4ffe04db1c5148f1b29b82a615825c7d83c7e kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct }} aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge @@ -229,12 +229,12 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@e97583c88a065c6f8f81d7b32010e0bb64c1887c + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@88a4ffe04db1c5148f1b29b82a615825c7d83c7e if: always() && github.event_name == 'schedule' && github.ref == 'refs/heads/develop' secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: e97583c88a065c6f8f81d7b32010e0bb64c1887c + agglayer-e2e-ref: 88a4ffe04db1c5148f1b29b82a615825c7d83c7e kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct-aggoracle-committee }} test-name: "test-single-l2-network-op-succinct-aggoracle-committee" @@ -258,11 +258,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@e97583c88a065c6f8f81d7b32010e0bb64c1887c + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@88a4ffe04db1c5148f1b29b82a615825c7d83c7e secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: e97583c88a065c6f8f81d7b32010e0bb64c1887c + agglayer-e2e-ref: 88a4ffe04db1c5148f1b29b82a615825c7d83c7e kurtosis-cdk-enclave-name: op aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-1 }} @@ -284,11 +284,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@e97583c88a065c6f8f81d7b32010e0bb64c1887c + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@88a4ffe04db1c5148f1b29b82a615825c7d83c7e secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: e97583c88a065c6f8f81d7b32010e0bb64c1887c + agglayer-e2e-ref: 88a4ffe04db1c5148f1b29b82a615825c7d83c7e kurtosis-cdk-enclave-name: aggkit aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-3 }} From ccd81db71f8f4befea73909e15055ca952062ea5 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Sat, 10 Jan 2026 11:22:59 +0530 Subject: [PATCH 68/73] update ref --- .github/workflows/test-e2e.yml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 3663320d7..b4def20a1 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -173,11 +173,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@88a4ffe04db1c5148f1b29b82a615825c7d83c7e + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@45891d12f326003b171e868951d96d50df8810f5 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 88a4ffe04db1c5148f1b29b82a615825c7d83c7e + agglayer-e2e-ref: 45891d12f326003b171e868951d96d50df8810f5 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-pessimistic }} test-name: "test-single-l2-network-op-pessimistic" @@ -201,11 +201,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@88a4ffe04db1c5148f1b29b82a615825c7d83c7e + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@45891d12f326003b171e868951d96d50df8810f5 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 88a4ffe04db1c5148f1b29b82a615825c7d83c7e + agglayer-e2e-ref: 45891d12f326003b171e868951d96d50df8810f5 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct }} aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge @@ -229,12 +229,12 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@88a4ffe04db1c5148f1b29b82a615825c7d83c7e + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@45891d12f326003b171e868951d96d50df8810f5 if: always() && github.event_name == 'schedule' && github.ref == 'refs/heads/develop' secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 88a4ffe04db1c5148f1b29b82a615825c7d83c7e + agglayer-e2e-ref: 45891d12f326003b171e868951d96d50df8810f5 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct-aggoracle-committee }} test-name: "test-single-l2-network-op-succinct-aggoracle-committee" @@ -258,11 +258,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@88a4ffe04db1c5148f1b29b82a615825c7d83c7e + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@45891d12f326003b171e868951d96d50df8810f5 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 88a4ffe04db1c5148f1b29b82a615825c7d83c7e + agglayer-e2e-ref: 45891d12f326003b171e868951d96d50df8810f5 kurtosis-cdk-enclave-name: op aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-1 }} @@ -284,11 +284,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@88a4ffe04db1c5148f1b29b82a615825c7d83c7e + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@45891d12f326003b171e868951d96d50df8810f5 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 88a4ffe04db1c5148f1b29b82a615825c7d83c7e + agglayer-e2e-ref: 45891d12f326003b171e868951d96d50df8810f5 kurtosis-cdk-enclave-name: aggkit aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-3 }} From aabeb3b396477dfac826f3a24e5739ac503cc4e5 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Tue, 13 Jan 2026 13:39:34 +0530 Subject: [PATCH 69/73] update ref --- .github/workflows/test-e2e.yml | 22 +++++++++++----------- test/run-local-e2e.sh | 14 +++++++------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index b4def20a1..5a795c19a 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -18,7 +18,7 @@ concurrency: env: AGGKIT_REPORT_CHANNEL: "C092K6Z0EUT" - KURTOSIS_CDK_COMMIT: "e6a2ea5f11b525233501d673119a404d86b6c8d4" + KURTOSIS_CDK_COMMIT: "b74bdc657351238aa57ca7b7ae3aa35bed487b78" permissions: packages: write @@ -173,11 +173,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@45891d12f326003b171e868951d96d50df8810f5 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@4c599f56f27a0407d749bbbfbc4c6eda214c29e3 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 45891d12f326003b171e868951d96d50df8810f5 + agglayer-e2e-ref: 4c599f56f27a0407d749bbbfbc4c6eda214c29e3 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-pessimistic }} test-name: "test-single-l2-network-op-pessimistic" @@ -201,11 +201,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@45891d12f326003b171e868951d96d50df8810f5 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@4c599f56f27a0407d749bbbfbc4c6eda214c29e3 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 45891d12f326003b171e868951d96d50df8810f5 + agglayer-e2e-ref: 4c599f56f27a0407d749bbbfbc4c6eda214c29e3 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct }} aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge @@ -229,12 +229,12 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@45891d12f326003b171e868951d96d50df8810f5 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@4c599f56f27a0407d749bbbfbc4c6eda214c29e3 if: always() && github.event_name == 'schedule' && github.ref == 'refs/heads/develop' secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 45891d12f326003b171e868951d96d50df8810f5 + agglayer-e2e-ref: 4c599f56f27a0407d749bbbfbc4c6eda214c29e3 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct-aggoracle-committee }} test-name: "test-single-l2-network-op-succinct-aggoracle-committee" @@ -258,11 +258,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@45891d12f326003b171e868951d96d50df8810f5 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@4c599f56f27a0407d749bbbfbc4c6eda214c29e3 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 45891d12f326003b171e868951d96d50df8810f5 + agglayer-e2e-ref: 4c599f56f27a0407d749bbbfbc4c6eda214c29e3 kurtosis-cdk-enclave-name: op aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-1 }} @@ -284,11 +284,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@45891d12f326003b171e868951d96d50df8810f5 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@4c599f56f27a0407d749bbbfbc4c6eda214c29e3 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 45891d12f326003b171e868951d96d50df8810f5 + agglayer-e2e-ref: 4c599f56f27a0407d749bbbfbc4c6eda214c29e3 kurtosis-cdk-enclave-name: aggkit aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-3 }} diff --git a/test/run-local-e2e.sh b/test/run-local-e2e.sh index a8ac71135..52142fb3d 100755 --- a/test/run-local-e2e.sh +++ b/test/run-local-e2e.sh @@ -405,13 +405,13 @@ if [ "$E2E_REPO_PATH" != "-" ]; then bats ./tests/aggkit/bridge-e2e-aggoracle-committee.bats || exit 1 ;; single-l2-network-op-pessimistic) - bats ./tests/aggkit/bridge-e2e.bats || exit 1 - bats ./tests/aggkit/bridge-sovereign-chain-e2e.bats || exit 1 - bats ./tests/op/optimistic-mode.bats || exit 1 - bats ./tests/aggkit/bridge-e2e-nightly.bats || exit 1 - bats ./tests/aggkit/internal-claims.bats || exit 1 - bats ./tests/aggkit/claim-reetrancy.bats || exit 1 - bats ./tests/aggkit/aggsender-committee-updates.bats || exit 1 + # bats ./tests/aggkit/bridge-e2e.bats || exit 1 + bats ./tests/aggkit/bridge-sovereign-chain-e2e.bats -f "Test backwardLET with reorg scenarios" || exit 1 + # bats ./tests/op/optimistic-mode.bats || exit 1 + # bats ./tests/aggkit/bridge-e2e-nightly.bats || exit 1 + # bats ./tests/aggkit/internal-claims.bats || exit 1 + # bats ./tests/aggkit/claim-reetrancy.bats || exit 1 + # bats ./tests/aggkit/aggsender-committee-updates.bats || exit 1 ;; multi-l2-networks-2-chains-op-pessimistic) bats ./tests/aggkit/bridge-e2e-2-chains.bats || exit 1 From 7ff6314df314fa74ec9c4d705e863e3068d4a5f5 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Thu, 15 Jan 2026 13:14:59 +0530 Subject: [PATCH 70/73] update ref --- .github/workflows/test-e2e.yml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 5a795c19a..f73eafefb 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -18,7 +18,7 @@ concurrency: env: AGGKIT_REPORT_CHANNEL: "C092K6Z0EUT" - KURTOSIS_CDK_COMMIT: "b74bdc657351238aa57ca7b7ae3aa35bed487b78" + KURTOSIS_CDK_COMMIT: "b5bd6db3475e27461dbb081f09f47fc398d63439" permissions: packages: write @@ -173,11 +173,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@4c599f56f27a0407d749bbbfbc4c6eda214c29e3 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@ce7f75e8d67c774fd8afdd9effae38f90ee41024 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 4c599f56f27a0407d749bbbfbc4c6eda214c29e3 + agglayer-e2e-ref: ce7f75e8d67c774fd8afdd9effae38f90ee41024 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-pessimistic }} test-name: "test-single-l2-network-op-pessimistic" @@ -201,11 +201,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@4c599f56f27a0407d749bbbfbc4c6eda214c29e3 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@ce7f75e8d67c774fd8afdd9effae38f90ee41024 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 4c599f56f27a0407d749bbbfbc4c6eda214c29e3 + agglayer-e2e-ref: ce7f75e8d67c774fd8afdd9effae38f90ee41024 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct }} aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge @@ -229,12 +229,12 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@4c599f56f27a0407d749bbbfbc4c6eda214c29e3 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@ce7f75e8d67c774fd8afdd9effae38f90ee41024 if: always() && github.event_name == 'schedule' && github.ref == 'refs/heads/develop' secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 4c599f56f27a0407d749bbbfbc4c6eda214c29e3 + agglayer-e2e-ref: ce7f75e8d67c774fd8afdd9effae38f90ee41024 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct-aggoracle-committee }} test-name: "test-single-l2-network-op-succinct-aggoracle-committee" @@ -258,11 +258,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@4c599f56f27a0407d749bbbfbc4c6eda214c29e3 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@ce7f75e8d67c774fd8afdd9effae38f90ee41024 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 4c599f56f27a0407d749bbbfbc4c6eda214c29e3 + agglayer-e2e-ref: ce7f75e8d67c774fd8afdd9effae38f90ee41024 kurtosis-cdk-enclave-name: op aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-1 }} @@ -284,11 +284,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@4c599f56f27a0407d749bbbfbc4c6eda214c29e3 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@ce7f75e8d67c774fd8afdd9effae38f90ee41024 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: 4c599f56f27a0407d749bbbfbc4c6eda214c29e3 + agglayer-e2e-ref: ce7f75e8d67c774fd8afdd9effae38f90ee41024 kurtosis-cdk-enclave-name: aggkit aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-3 }} From d700fd8a5233c43f311e9bab60c0b663eeac4925 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Thu, 15 Jan 2026 13:21:03 +0530 Subject: [PATCH 71/73] update ref --- .github/test_e2e_cdk_erigon_args_base.json | 2 +- .github/test_e2e_op_args_base.json | 2 +- .github/workflows/test-e2e.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/test_e2e_cdk_erigon_args_base.json b/.github/test_e2e_cdk_erigon_args_base.json index 1e6d027d0..fee321dab 100644 --- a/.github/test_e2e_cdk_erigon_args_base.json +++ b/.github/test_e2e_cdk_erigon_args_base.json @@ -2,7 +2,7 @@ "deployment_stages": { "deploy_l2_contracts": true, "deploy_op_succinct": false, - "deploy_cdk_bridge_infra": false + "deploy_zkevm_bridge_service": false }, "args": { "consensus_contract_type": "ecdsa-multisig", diff --git a/.github/test_e2e_op_args_base.json b/.github/test_e2e_op_args_base.json index 209b2c866..586685bb6 100644 --- a/.github/test_e2e_op_args_base.json +++ b/.github/test_e2e_op_args_base.json @@ -1,7 +1,7 @@ { "deployment_stages": { "deploy_op_succinct": false, - "deploy_cdk_bridge_infra": true + "deploy_zkevm_bridge_service": true }, "args": { "aggkit_image": "aggkit:local", diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index f73eafefb..f255f81c8 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -18,7 +18,7 @@ concurrency: env: AGGKIT_REPORT_CHANNEL: "C092K6Z0EUT" - KURTOSIS_CDK_COMMIT: "b5bd6db3475e27461dbb081f09f47fc398d63439" + KURTOSIS_CDK_COMMIT: "a19cb8a3038112bd459a938e2972d43dbac325ed" permissions: packages: write From 0656fbbb05c6d6c1afe3c9aba66aeaad6dfaba98 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Thu, 15 Jan 2026 14:08:32 +0530 Subject: [PATCH 72/73] update ref --- .github/workflows/test-e2e.yml | 20 ++++++++++---------- test/run-local-e2e.sh | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index f255f81c8..89bc9f723 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -173,11 +173,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@ce7f75e8d67c774fd8afdd9effae38f90ee41024 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@e6a82374d52253099b92db12dcb86c6d92f4cfe1 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: ce7f75e8d67c774fd8afdd9effae38f90ee41024 + agglayer-e2e-ref: e6a82374d52253099b92db12dcb86c6d92f4cfe1 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-pessimistic }} test-name: "test-single-l2-network-op-pessimistic" @@ -201,11 +201,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@ce7f75e8d67c774fd8afdd9effae38f90ee41024 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@e6a82374d52253099b92db12dcb86c6d92f4cfe1 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: ce7f75e8d67c774fd8afdd9effae38f90ee41024 + agglayer-e2e-ref: e6a82374d52253099b92db12dcb86c6d92f4cfe1 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct }} aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge @@ -229,12 +229,12 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@ce7f75e8d67c774fd8afdd9effae38f90ee41024 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@e6a82374d52253099b92db12dcb86c6d92f4cfe1 if: always() && github.event_name == 'schedule' && github.ref == 'refs/heads/develop' secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: ce7f75e8d67c774fd8afdd9effae38f90ee41024 + agglayer-e2e-ref: e6a82374d52253099b92db12dcb86c6d92f4cfe1 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct-aggoracle-committee }} test-name: "test-single-l2-network-op-succinct-aggoracle-committee" @@ -258,11 +258,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@ce7f75e8d67c774fd8afdd9effae38f90ee41024 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@e6a82374d52253099b92db12dcb86c6d92f4cfe1 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: ce7f75e8d67c774fd8afdd9effae38f90ee41024 + agglayer-e2e-ref: e6a82374d52253099b92db12dcb86c6d92f4cfe1 kurtosis-cdk-enclave-name: op aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-1 }} @@ -284,11 +284,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@ce7f75e8d67c774fd8afdd9effae38f90ee41024 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@e6a82374d52253099b92db12dcb86c6d92f4cfe1 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: ce7f75e8d67c774fd8afdd9effae38f90ee41024 + agglayer-e2e-ref: e6a82374d52253099b92db12dcb86c6d92f4cfe1 kurtosis-cdk-enclave-name: aggkit aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-3 }} diff --git a/test/run-local-e2e.sh b/test/run-local-e2e.sh index 52142fb3d..04834f2da 100755 --- a/test/run-local-e2e.sh +++ b/test/run-local-e2e.sh @@ -406,7 +406,7 @@ if [ "$E2E_REPO_PATH" != "-" ]; then ;; single-l2-network-op-pessimistic) # bats ./tests/aggkit/bridge-e2e.bats || exit 1 - bats ./tests/aggkit/bridge-sovereign-chain-e2e.bats -f "Test backwardLET with reorg scenarios" || exit 1 + bats ./tests/aggkit/bridge-sovereign-chain-e2e.bats -f "Test forwardLET with reorg scenarios" || exit 1 # bats ./tests/op/optimistic-mode.bats || exit 1 # bats ./tests/aggkit/bridge-e2e-nightly.bats || exit 1 # bats ./tests/aggkit/internal-claims.bats || exit 1 From bd49e83b7dbc488834881d9e0cbfd589419bd9b6 Mon Sep 17 00:00:00 2001 From: Arpit Temani Date: Thu, 15 Jan 2026 18:26:29 +0530 Subject: [PATCH 73/73] update ref --- .github/workflows/test-e2e.yml | 20 ++++++++++---------- test/run-local-e2e.sh | 14 +++++++------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 89bc9f723..9c1675f66 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -173,11 +173,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@e6a82374d52253099b92db12dcb86c6d92f4cfe1 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@55457421ba75b421cc611022db81bbeefd3e1f60 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: e6a82374d52253099b92db12dcb86c6d92f4cfe1 + agglayer-e2e-ref: 55457421ba75b421cc611022db81bbeefd3e1f60 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-pessimistic }} test-name: "test-single-l2-network-op-pessimistic" @@ -201,11 +201,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@e6a82374d52253099b92db12dcb86c6d92f4cfe1 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@55457421ba75b421cc611022db81bbeefd3e1f60 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: e6a82374d52253099b92db12dcb86c6d92f4cfe1 + agglayer-e2e-ref: 55457421ba75b421cc611022db81bbeefd3e1f60 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct }} aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge @@ -229,12 +229,12 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@e6a82374d52253099b92db12dcb86c6d92f4cfe1 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-single-chain.yml@55457421ba75b421cc611022db81bbeefd3e1f60 if: always() && github.event_name == 'schedule' && github.ref == 'refs/heads/develop' secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: e6a82374d52253099b92db12dcb86c6d92f4cfe1 + agglayer-e2e-ref: 55457421ba75b421cc611022db81bbeefd3e1f60 kurtosis-cdk-enclave-name: op kurtosis-cdk-args: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-single-op-succinct-aggoracle-committee }} test-name: "test-single-l2-network-op-succinct-aggoracle-committee" @@ -258,11 +258,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@e6a82374d52253099b92db12dcb86c6d92f4cfe1 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@55457421ba75b421cc611022db81bbeefd3e1f60 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: e6a82374d52253099b92db12dcb86c6d92f4cfe1 + agglayer-e2e-ref: 55457421ba75b421cc611022db81bbeefd3e1f60 kurtosis-cdk-enclave-name: op aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-1 }} @@ -284,11 +284,11 @@ jobs: - build-tools - read-aggkit-args - get-kurtosis-cdk-commit - uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@e6a82374d52253099b92db12dcb86c6d92f4cfe1 + uses: agglayer/e2e/.github/workflows/aggkit-e2e-multi-chains.yml@55457421ba75b421cc611022db81bbeefd3e1f60 secrets: inherit with: kurtosis-cdk-ref: ${{ needs.get-kurtosis-cdk-commit.outputs.kurtosis-commit }} - agglayer-e2e-ref: e6a82374d52253099b92db12dcb86c6d92f4cfe1 + agglayer-e2e-ref: 55457421ba75b421cc611022db81bbeefd3e1f60 kurtosis-cdk-enclave-name: aggkit aggsender-find-imported-bridge-artifact: aggsender_find_imported_bridge kurtosis-cdk-args-1: ${{ needs.read-aggkit-args.outputs.kurtosis-cdk-args-3 }} diff --git a/test/run-local-e2e.sh b/test/run-local-e2e.sh index 04834f2da..a8ac71135 100755 --- a/test/run-local-e2e.sh +++ b/test/run-local-e2e.sh @@ -405,13 +405,13 @@ if [ "$E2E_REPO_PATH" != "-" ]; then bats ./tests/aggkit/bridge-e2e-aggoracle-committee.bats || exit 1 ;; single-l2-network-op-pessimistic) - # bats ./tests/aggkit/bridge-e2e.bats || exit 1 - bats ./tests/aggkit/bridge-sovereign-chain-e2e.bats -f "Test forwardLET with reorg scenarios" || exit 1 - # bats ./tests/op/optimistic-mode.bats || exit 1 - # bats ./tests/aggkit/bridge-e2e-nightly.bats || exit 1 - # bats ./tests/aggkit/internal-claims.bats || exit 1 - # bats ./tests/aggkit/claim-reetrancy.bats || exit 1 - # bats ./tests/aggkit/aggsender-committee-updates.bats || exit 1 + bats ./tests/aggkit/bridge-e2e.bats || exit 1 + bats ./tests/aggkit/bridge-sovereign-chain-e2e.bats || exit 1 + bats ./tests/op/optimistic-mode.bats || exit 1 + bats ./tests/aggkit/bridge-e2e-nightly.bats || exit 1 + bats ./tests/aggkit/internal-claims.bats || exit 1 + bats ./tests/aggkit/claim-reetrancy.bats || exit 1 + bats ./tests/aggkit/aggsender-committee-updates.bats || exit 1 ;; multi-l2-networks-2-chains-op-pessimistic) bats ./tests/aggkit/bridge-e2e-2-chains.bats || exit 1