Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
44 commits
Select commit Hold shift + click to select a range
0899b2a
tob filtering if no rob
NateAtNodeKit Feb 14, 2025
abe68bd
debugging tests
rtavarezz Feb 14, 2025
2a19ef6
updates
NateAtNodeKit Feb 14, 2025
82c8003
debugging failing test
rtavarezz Feb 16, 2025
67cdff0
update
NateAtNodeKit Feb 17, 2025
bf17be0
base nonce fixed but fails at SimBlock call
rtavarezz Feb 17, 2025
244cd56
quick nonce fix & checkpoint for debugging
rtavarezz Feb 17, 2025
d45d167
progress, still debugging tests. len txs mismatch.
rtavarezz Feb 18, 2025
aced420
add txs hash stability test
NateAtNodeKit Feb 18, 2025
40496b2
tob base case passes! added rob submission for tob
rtavarezz Feb 18, 2025
b9b3a7e
added tob bundle filtering logic
rtavarezz Feb 18, 2025
0ba930d
update tob filtering
NateAtNodeKit Feb 19, 2025
7975993
tob filtering updates
NateAtNodeKit Feb 19, 2025
4b98643
tobnonce test wip
NateAtNodeKit Feb 19, 2025
17f5908
debugging failing tests checkpoint
rtavarezz Feb 20, 2025
fa1eabb
tobstate update
NateAtNodeKit Feb 20, 2025
1894835
fix TestToBNonceState
bianyuanop Feb 20, 2025
ff510e4
adding new test logic to failing test cases
rtavarezz Feb 20, 2025
a22af1e
overall basic flow updates
NateAtNodeKit Feb 20, 2025
69471bb
fix overall test
NateAtNodeKit Feb 21, 2025
3156f38
fix null payload resp case in mockdb
NateAtNodeKit Feb 21, 2025
7523d1c
update
NateAtNodeKit Feb 21, 2025
20890b4
update
NateAtNodeKit Feb 21, 2025
4924c12
tx simulation allowed revert
NateAtNodeKit Feb 21, 2025
ec5772d
working on submit new block req test cases
rtavarezz Feb 21, 2025
3df5c24
fix incoming RoB conflicts with previous ToB, disallowed simulation r…
bianyuanop Feb 21, 2025
1816783
debugging last 2 failing cases, checkpoint.
rtavarezz Feb 22, 2025
39db9aa
updates
NateAtNodeKit Feb 24, 2025
d1a93d1
update
rtavarezz Feb 24, 2025
4d8719d
feedback comment
rtavarezz Feb 24, 2025
85a6bd6
failing test resolved
rtavarezz Feb 25, 2025
5768624
test tob nonce update
rtavarezz Feb 25, 2025
3864778
lint fix
bianyuanop Feb 28, 2025
573f4ef
merge test-clean
bianyuanop Feb 28, 2025
e7ddcfe
resolve grammer issue
bianyuanop Feb 28, 2025
1bc7a0b
just tob fix
NateAtNodeKit Feb 28, 2025
b314d64
fixes for recv preconf tests
NateAtNodeKit Feb 28, 2025
4f9210b
fix chunk mgr config
NateAtNodeKit Feb 28, 2025
a8775c3
add actschan mock
NateAtNodeKit Feb 28, 2025
2768919
quick TestToBNonceState fix
rtavarezz Feb 28, 2025
98b4be3
fix overall basic flow
NateAtNodeKit Mar 1, 2025
7c8b1d9
fix overall flow test
NateAtNodeKit Mar 1, 2025
d9ae1c0
fix payload tests
NateAtNodeKit Mar 1, 2025
a92e856
Merge pull request #71 from AnomalyFi/test-clean-merge
bianyuanop Mar 3, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion chunk_manager/chunk_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,10 +97,14 @@ func NewChunkManager(cfg *ChunkManagerConfig) (*ChunkManager, error) {
return nil, errors.New("provided state tob nonce greater than highest settled tob nonce")
}

if cfg.ExpirationTime < 0 || cfg.GCInterval < 0 || cfg.LayerSubmissionCheckInterval < 0 {
if cfg.ExpirationTime < 0 || cfg.GCInterval < 0 || cfg.LayerSubmissionCheckInterval <= 0 {
return nil, fmt.Errorf("invalid duration variables, ExpirationTime: %s, GCInterval: %s, LayerSubmissionCheckInterval: %s", cfg.ExpirationTime, cfg.GCInterval, cfg.LayerSubmissionCheckInterval)
}

if cfg.SEQClient == nil {
return nil, errors.New("SEQClient is required")
}

// when cfg.StateLowestToBNonce =0, after pushing the first element to both PQs, we add a place holder layer as the first layer
// the tob nonce of which is headTobNonce+1
// when cfg.StateLowestToBNonce >0, chunks will be replayed with the base headToBNonce to be cfg.StateLowestToBNonce-1, hence guarantees
Expand Down
43 changes: 42 additions & 1 deletion common/test_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -250,12 +250,45 @@ func DisplayEthTxs(txs map[string]ethtypes.Transactions) {
for domain, domainTxs := range txs {
fmt.Printf("domain: %s\n", domain)
for _, tx := range domainTxs {
fmt.Printf("tx: %s\n", tx.Hash().Hex())
sender, err := ExtractSender(tx)
if err != nil {
panic(err)
}
fmt.Printf("sender: %s tx: %s:%d\n", sender, tx.Hash().Hex(), tx.Nonce())
}
}
fmt.Printf("========txs info end=======\n")
}

func FindTxHash(txLhs *ethtypes.Transaction, txs []*ethtypes.Transaction) bool {
if txLhs == nil {
panic("txLhs is nil")
}
lhsHash := txLhs.Hash()
for _, tx := range txs {
rhsHash := tx.Hash()
if lhsHash == rhsHash {
return true
}
}
return false
}

func TxsHashUnorderedMatch(txsLhs []*ethtypes.Transaction, txsRhs []*ethtypes.Transaction) bool {
if len(txsLhs) != len(txsRhs) {
return false
}

for _, tx := range txsLhs {
if !FindTxHash(tx, txsRhs) {
fmt.Printf("could not find tx hash " + tx.Hash().Hex())
return false
}
}

return true
}

func SyncMapLen(m *sync.Map) int {
var length int
m.Range(func(_, _ interface{}) bool {
Expand All @@ -264,3 +297,11 @@ func SyncMapLen(m *sync.Map) int {
})
return length
}

func CollectChunksFromRequests(reqs ...*SubmitNewBlockRequest) []*ArcadiaChunk {
chunks := make([]*ArcadiaChunk, len(reqs))
for i, req := range reqs {
chunks[i] = &req.Chunk
}
return chunks
}
74 changes: 68 additions & 6 deletions common/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (
"time"

srpc "github.com/AnomalyFi/nodekit-seq/rpc"
"github.com/rollkit/go-da"

hactions "github.com/AnomalyFi/hypersdk/actions"
"github.com/gorilla/websocket"
Expand Down Expand Up @@ -205,10 +206,12 @@ type ToBChunk struct {
txHash2BundleHash map[string]string
revertingTxHashes map[string]struct{}

// refers to whether bundle at idx has been removed
removedBitSet *bitset.BitSet
domains map[string]struct{} // chain ids or rollup ids
seqTxs []*chain.Transaction
initialized bool

domains map[string]struct{} // chain ids or rollup ids
seqTxs []*chain.Transaction
initialized bool

l sync.RWMutex
}
Expand All @@ -229,6 +232,17 @@ func (tob *ToBChunk) GetBundles() []*CrossRollupBundle {
return ret
}

func (tob *ToBChunk) IsBundleIdxFiltered(idx int) bool {
tob.l.RLock()
defer tob.l.RUnlock()

if idx >= len(tob.Bundles) {
return false
}

return tob.removedBitSet.Test(uint(idx))
}

func (tob *ToBChunk) GetTxs() map[string]ethtypes.Transactions {
tob.l.RLock()
defer tob.l.RUnlock()
Expand Down Expand Up @@ -344,10 +358,45 @@ func (tob *ToBChunk) removeBundleContainTx(txHash string) (*CrossRollupBundle, e
bundleIdx2remove := slices.IndexFunc(tob.Bundles, func(crb *CrossRollupBundle) bool {
return crb.BundleHash == bundleHash
})

// mark as removed
tob.removedBitSet = tob.removedBitSet.Set(uint(bundleIdx2remove))

// re-populate [tob.txs]
tob.repopulateToBTxs()

return tob.Bundles[bundleIdx2remove], nil
}

func (tob *ToBChunk) FilterBundleWithHash(bundleHash string) (*CrossRollupBundle, error) {
tob.l.Lock()
defer tob.l.Unlock()

var foundIdx int
var foundBundle *CrossRollupBundle

for bundleIdx, bundle := range tob.Bundles {
if bundle.BundleHash == bundleHash {
foundIdx = bundleIdx
foundBundle = bundle
break
}
}

if foundBundle == nil {
return nil, fmt.Errorf("filterBundleWithHash found no bundle hash [%s]", bundleHash)
}

// mark as removed
tob.removedBitSet = tob.removedBitSet.Set(uint(foundIdx))

// re-populate [tob.txs]
tob.repopulateToBTxs()

return tob.Bundles[foundIdx], nil
}

func (tob *ToBChunk) repopulateToBTxs() {
tob.txs = make(map[string]ethtypes.Transactions)
for bundleIdx, bundle := range tob.Bundles {
// continue as this bundle was removed
Expand All @@ -360,13 +409,12 @@ func (tob *ToBChunk) removeBundleContainTx(txHash string) (*CrossRollupBundle, e
tob.txs[domain] = l
}
}

// track domains that contain txs in it
tob.domains = make(map[string]struct{})
for domain := range tob.txs {
tob.domains[domain] = struct{}{}
}

return tob.Bundles[bundleIdx2remove], nil
}

// LowestBlockNumber return the tracked lowest heights for domains, this prevents the situation that
Expand Down Expand Up @@ -527,7 +575,7 @@ type RoBChunk struct {
BlockNumber uint64 `json:"block_number"`

// following fields will be populated after initialization
removedBitSet *bitset.BitSet
removedBitSet *bitset.BitSet // refers to whether tx within txs at idx has been removed
initialized bool
txs ethtypes.Transactions
seqTxs []*chain.Transaction
Expand Down Expand Up @@ -1541,6 +1589,10 @@ func (b *CrossRollupBundle) Domains() []string {
return maps.Keys(b.txs)
}

func (b *CrossRollupBundle) HasTxs() bool {
return len(b.txs) > 0
}

func (b *CrossRollupBundle) ContainTx(txHash string) bool {
for _, txs := range b.txs {
contain := slices.ContainsFunc(txs, func(t *ethtypes.Transaction) bool {
Expand Down Expand Up @@ -1645,3 +1697,13 @@ type CertInfo struct {
Cert []byte
PlaceHolder bool
}

type DACertificate = []byte

type DAPayload = []byte

type BlobInfo struct {
BlobID []DACertificate
Proof []da.Proof
IsFinalized bool
}
11 changes: 6 additions & 5 deletions database/mockdb.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ type MockDB struct {
RoBChunkMap map[string]map[uint64]*common.RoBChunkDB
RoBChunkAcceptedMap map[string]*common.RoBChunkAcceptedDB
LastFetchedBlockNum common.LastFetchedBlockNumberDB
PayloadResp common.PayloadDB
PayloadResp *common.PayloadDB
Epoch uint64
EpochLowestToBNonce map[uint64]uint64
PayloadTxsToB map[string]*common.PayloadTxs
Expand All @@ -54,7 +54,6 @@ func NewMockDB() *MockDB {
RoBChunkMap: make(map[string]map[uint64]*common.RoBChunkDB),
RoBChunkAcceptedMap: make(map[string]*common.RoBChunkAcceptedDB),
LastFetchedBlockNum: common.LastFetchedBlockNumberDB{},
PayloadResp: common.PayloadDB{},
Epoch: 0,
EpochLowestToBNonce: make(map[uint64]uint64),
PayloadTxsToB: make(map[string]*common.PayloadTxs),
Expand Down Expand Up @@ -247,8 +246,10 @@ func (db *MockDB) RemoveBestAuctionBid(epoch uint64) error {
func (db *MockDB) GetPayloadResp(chainID string, blockNumber uint64) (*common.PayloadDB, error) {
db.l.Lock()
defer db.l.Unlock()

return &db.PayloadResp, nil
if db.PayloadResp == nil {
return nil, nil
}
return db.PayloadResp, nil
}

func (db *MockDB) SetPayloadResp(chainID string, blockNumber uint64, txs *common.GetPayloadResponse) error {
Expand All @@ -260,7 +261,7 @@ func (db *MockDB) SetPayloadResp(chainID string, blockNumber uint64, txs *common
return err
}

db.PayloadResp = *payloadTxs
db.PayloadResp = payloadTxs
return nil
}

Expand Down
21 changes: 6 additions & 15 deletions datalayer/da_submitter.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ const (

type IDASubmitter interface {
ChunksChan() chan *common.ArcadiaToSEQChunkMessage
SubmitAndFinalizeBlob(ctx context.Context, payload common.DAPayload) (*common.BlobInfo, error)
}

type DASubmitterOpts struct {
Expand All @@ -34,16 +35,6 @@ type DASubmitterOpts struct {
Mocking bool
}

type DACertificate = []byte

type DAPayload = []byte

type BlobInfo struct {
BlobID []DACertificate
Proof []da.Proof
IsFinalized bool
}

var _ IDASubmitter = (*DASubmitter)(nil)

// CelestiaConfig TODO: check fields below
Expand Down Expand Up @@ -120,7 +111,7 @@ func NewDASubmitter(opts DASubmitterOpts) (*DASubmitter, error) {
chunkCancel()
continue
}
cert, err := submitter.submitAndFinalizeBlob(chunkCtx, blob)
cert, err := submitter.SubmitAndFinalizeBlob(chunkCtx, blob)
if err != nil {
log.WithError(err).Error("unable to submit blob to DA")
chunkCancel()
Expand Down Expand Up @@ -152,14 +143,14 @@ func NewDASubmitter(opts DASubmitterOpts) (*DASubmitter, error) {
return submitter, nil
}

// TODO: submission and finalization check should be separated in the future
// SubmitAndFinalizeBlob Submit does submission of the given payload to the data availability layer.
// It will retry maxRetries number of times with increasing backoff until it succeeds.
// On success, returns the DA certificate as []byte.
// On failure, returns nil byte slice and error
func (da *DASubmitter) submitAndFinalizeBlob(ctx context.Context, payload DAPayload) (*BlobInfo, error) {
// TODO: submission and finalization check should be separated in the future
func (da *DASubmitter) SubmitAndFinalizeBlob(ctx context.Context, payload common.DAPayload) (*common.BlobInfo, error) {
var numRetries int
var certificateDA DACertificate
var certificateDA common.DACertificate

for {
numRetries++
Expand Down Expand Up @@ -203,7 +194,7 @@ func (da *DASubmitter) submitAndFinalizeBlob(ctx context.Context, payload DAPayl
}
da.log.Infof("Retrieved proofs for blobs: %v", proofs)

cert := &BlobInfo{
cert := &common.BlobInfo{
BlobID: [][]byte{certificateDA},
Proof: proofs,
IsFinalized: true,
Expand Down
2 changes: 1 addition & 1 deletion datalayer/da_submitter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func TestDAClientCanPushCertToChunkManager(t *testing.T) {
mockDA.EXPECT().Submit(mock.Anything, [][]byte{blob}, celestiaConfig.GasPrice, celestiaConfig.NamespaceID).Return([][]byte{testBlobID}, nil)
mockDA.EXPECT().GetProofs(mock.Anything, [][]byte{testBlobID}, celestiaConfig.NamespaceID).Return([][]byte{testProof}, nil)

expectedCert := BlobInfo{
expectedCert := common.BlobInfo{
BlobID: [][]byte{testBlobID},
Proof: [][]byte{testProof},
IsFinalized: true,
Expand Down
61 changes: 61 additions & 0 deletions datalayer/mocks/mock_IDASubmitter.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading