From f6053ab2896b894eb714b0de597fe12e4199c02a Mon Sep 17 00:00:00 2001 From: paschal533 Date: Sat, 11 Apr 2026 23:04:30 +0100 Subject: [PATCH 01/14] feat(common): add From for ErrorObjectOwned --- common/Cargo.toml | 1 + common/src/transaction.rs | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/common/Cargo.toml b/common/Cargo.toml index dbf5ec0c9..c67db28fe 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -22,3 +22,4 @@ log.workspace = true hex.workspace = true borsh.workspace = true logos-blockchain-common-http-client.workspace = true +jsonrpsee = { workspace = true } diff --git a/common/src/transaction.rs b/common/src/transaction.rs index 7ce0e76fb..ee7d54d28 100644 --- a/common/src/transaction.rs +++ b/common/src/transaction.rs @@ -1,4 +1,5 @@ use borsh::{BorshDeserialize, BorshSerialize}; +use jsonrpsee::types::{ErrorCode, ErrorObjectOwned}; use log::warn; use nssa::{AccountId, V03State, ValidatedStateDiff}; use nssa_core::{BlockId, Timestamp}; @@ -154,6 +155,12 @@ pub enum TransactionMalformationError { TransactionTooLarge { size: usize, max: usize }, } +impl From for ErrorObjectOwned { + fn from(err: TransactionMalformationError) -> Self { + ErrorObjectOwned::owned(ErrorCode::InvalidParams.code(), err.to_string(), None::<()>) + } +} + /// Returns the canonical Clock Program invocation transaction for the given block timestamp. /// Every valid block must end with exactly one occurrence of this transaction. #[must_use] @@ -170,3 +177,32 @@ pub fn clock_invocation(timestamp: clock_core::Instruction) -> nssa::PublicTrans nssa::public_transaction::WitnessSet::from_raw_parts(vec![]), ) } + +#[cfg(test)] +mod malformation_error_tests { + use jsonrpsee::types::ErrorCode; + + use super::*; + + #[test] + fn from_too_large_produces_invalid_params_code() { + let err = TransactionMalformationError::TransactionTooLarge { size: 100, max: 50 }; + let rpc_err: jsonrpsee::types::ErrorObjectOwned = err.into(); + assert_eq!(rpc_err.code(), ErrorCode::InvalidParams.code()); + assert!(rpc_err.message().contains("exceeds maximum")); + } + + #[test] + fn from_failed_decode_produces_invalid_params_code() { + let err = TransactionMalformationError::FailedToDecode { tx: crate::HashType([0; 32]) }; + let rpc_err: jsonrpsee::types::ErrorObjectOwned = err.into(); + assert_eq!(rpc_err.code(), ErrorCode::InvalidParams.code()); + } + + #[test] + fn from_invalid_signature_produces_invalid_params_code() { + let err = TransactionMalformationError::InvalidSignature; + let rpc_err: jsonrpsee::types::ErrorObjectOwned = err.into(); + assert_eq!(rpc_err.code(), ErrorCode::InvalidParams.code()); + } +} From 6d7f4814e4509b611090d7bbcb1df9f51461f3aa Mon Sep 17 00:00:00 2001 From: paschal533 Date: Sat, 11 Apr 2026 23:43:16 +0100 Subject: [PATCH 02/14] fix(sequencer): replace .expect() crash with typed TransactionMalformationError Replace panicking .expect() calls in send_transaction with proper error propagation using TransactionMalformationError variants. Unify the size error to use TransactionMalformationError::TransactionTooLarge instead of a raw format string. Add unit tests for too-large and valid transaction paths. --- sequencer/service/Cargo.toml | 6 ++ sequencer/service/src/service.rs | 115 +++++++++++++++++++++++++++---- 2 files changed, 106 insertions(+), 15 deletions(-) diff --git a/sequencer/service/Cargo.toml b/sequencer/service/Cargo.toml index 6fee808cb..bf9b84455 100644 --- a/sequencer/service/Cargo.toml +++ b/sequencer/service/Cargo.toml @@ -27,6 +27,12 @@ futures.workspace = true bytesize.workspace = true borsh.workspace = true +[dev-dependencies] +sequencer_core = { workspace = true, features = ["mock"] } +bedrock_client.workspace = true +logos-blockchain-core.workspace = true +tempfile.workspace = true + [features] default = [] # Runs the sequencer in standalone mode without depending on Bedrock and Indexer services. diff --git a/sequencer/service/src/service.rs b/sequencer/service/src/service.rs index 716453631..b1574fdc2 100644 --- a/sequencer/service/src/service.rs +++ b/sequencer/service/src/service.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeMap, sync::Arc}; -use common::transaction::NSSATransaction; +use common::transaction::{NSSATransaction, TransactionMalformationError}; use jsonrpsee::{ core::async_trait, types::{ErrorCode, ErrorObjectOwned}, @@ -49,30 +49,26 @@ impl max_tx_size { - return Err(ErrorObjectOwned::owned( - ErrorCode::InvalidParams.code(), - format!("Transaction too large: size {tx_size}, max {max_tx_size}"), - None::<()>, + return Err(ErrorObjectOwned::from( + TransactionMalformationError::TransactionTooLarge { + size: encoded_tx.len(), + max: usize::try_from(max_tx_size).unwrap_or(usize::MAX), + }, )); } let authenticated_tx = tx .transaction_stateless_check() .inspect_err(|err| warn!("Error at pre_check {err:#?}")) - .map_err(|err| { - ErrorObjectOwned::owned( - ErrorCode::InvalidParams.code(), - format!("{err:?}"), - None::<()>, - ) - })?; + .map_err(ErrorObjectOwned::from)?; self.mempool_handle .push(authenticated_tx) @@ -181,3 +177,92 @@ impl ErrorObjectOwned { ErrorObjectOwned::owned(ErrorCode::InternalError.code(), err.to_string(), None::<()>) } + +#[cfg(test)] +mod tests { + #![expect(clippy::shadow_unrelated, reason = "We don't care about it in tests")] + + use std::{sync::Arc, time::Duration}; + + use bedrock_client::BackoffConfig; + use common::test_utils::sequencer_sign_key_for_testing; + use jsonrpsee::types::ErrorCode; + use logos_blockchain_core::mantle::ops::channel::ChannelId; + use sequencer_core::{ + config::{BedrockConfig, SequencerConfig}, + mock::SequencerCoreWithMockClients, + }; + use tokio::sync::Mutex; + + use super::*; + + fn test_config() -> SequencerConfig { + let tempdir = tempfile::tempdir().unwrap(); + SequencerConfig { + home: tempdir.into_path(), + genesis_id: 1, + is_genesis_random: false, + max_num_tx_in_block: 10, + max_block_size: bytesize::ByteSize::b(512), + mempool_max_size: 10000, + block_create_timeout: Duration::from_secs(1), + signing_key: *sequencer_sign_key_for_testing().value(), + bedrock_config: BedrockConfig { + backoff: BackoffConfig { + start_delay: Duration::from_millis(100), + max_retries: 5, + }, + channel_id: ChannelId::from([0; 32]), + node_url: "http://not-used-in-tests".parse().unwrap(), + auth: None, + }, + retry_pending_blocks_timeout: Duration::from_mins(4), + indexer_rpc_url: "ws://localhost:8779".parse().unwrap(), + initial_public_accounts: None, + initial_private_accounts: None, + } + } + + async fn make_service( + ) -> SequencerService< + sequencer_core::mock::MockBlockSettlementClient, + sequencer_core::mock::MockIndexerClient, + > { + let config = test_config(); + let (core, mempool_handle) = + SequencerCoreWithMockClients::start_from_config(config).await; + let arc_core = Arc::new(Mutex::new(core)); + SequencerService::new(arc_core, mempool_handle, 512) + } + + #[tokio::test] + async fn send_transaction_too_large_returns_invalid_params() { + use sequencer_service_rpc::RpcServer as _; + let tiny_service = { + let config2 = SequencerConfig { + max_block_size: bytesize::ByteSize::b(1), + ..test_config() + }; + let (core2, mh2) = + SequencerCoreWithMockClients::start_from_config(config2).await; + SequencerService::new(Arc::new(Mutex::new(core2)), mh2, 1) + }; + let tx = common::test_utils::produce_dummy_empty_transaction(); + let result = tiny_service.send_transaction(tx).await; + assert!(result.is_err()); + let err = result.unwrap_err(); + assert_eq!(err.code(), ErrorCode::InvalidParams.code()); + assert!(err.message().contains("too large") || err.message().contains("exceeds maximum")); + } + + #[tokio::test] + async fn send_valid_transaction_returns_hash() { + use sequencer_service_rpc::RpcServer as _; + let service = make_service().await; + let tx = common::test_utils::produce_dummy_empty_transaction(); + let expected_hash = tx.hash(); + let result = service.send_transaction(tx).await; + assert!(result.is_ok()); + assert_eq!(result.unwrap(), expected_hash); + } +} From d2e71c8334481cb3b677c9fb98bdfd0cb9e359b1 Mon Sep 17 00:00:00 2001 From: paschal533 Date: Sat, 11 Apr 2026 23:49:23 +0100 Subject: [PATCH 03/14] feat(common): add TxStatus, TxReceipt, RejectedTxRecord types --- common/Cargo.toml | 3 +++ common/src/lib.rs | 1 + common/src/receipt.rs | 53 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+) create mode 100644 common/src/receipt.rs diff --git a/common/Cargo.toml b/common/Cargo.toml index c67db28fe..41f8b3958 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -23,3 +23,6 @@ hex.workspace = true borsh.workspace = true logos-blockchain-common-http-client.workspace = true jsonrpsee = { workspace = true } + +[dev-dependencies] +serde_json.workspace = true diff --git a/common/src/lib.rs b/common/src/lib.rs index a7744d63d..6c90b36a1 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -7,6 +7,7 @@ pub mod block; mod borsh_base64; pub mod config; pub mod transaction; +pub mod receipt; // Module for tests utility functions // TODO: Compile only for tests diff --git a/common/src/receipt.rs b/common/src/receipt.rs new file mode 100644 index 000000000..73e655a74 --- /dev/null +++ b/common/src/receipt.rs @@ -0,0 +1,53 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use serde::{Deserialize, Serialize}; + +use crate::HashType; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +pub enum TxStatus { + Pending, + Included { block_id: u64 }, + Rejected { reason: String }, + Unknown, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TxReceipt { + pub tx_hash: HashType, + pub status: TxStatus, + pub timestamp_ms: Option, +} + +#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +pub struct RejectedTxRecord { + pub reason: String, + pub timestamp_ms: u64, + pub block_height: u64, +} + +#[cfg(test)] +mod tests { + #[test] + fn rejected_tx_record_borsh_roundtrip() { + use super::RejectedTxRecord; + let record = RejectedTxRecord { + reason: "nonce mismatch".to_owned(), + timestamp_ms: 1_700_000_000_000, + block_height: 42, + }; + let encoded = borsh::to_vec(&record).unwrap(); + let decoded: RejectedTxRecord = borsh::from_slice(&encoded).unwrap(); + assert_eq!(record.reason, decoded.reason); + assert_eq!(record.timestamp_ms, decoded.timestamp_ms); + assert_eq!(record.block_height, decoded.block_height); + } + + #[test] + fn tx_status_serde_roundtrip() { + use super::TxStatus; + let status = TxStatus::Rejected { reason: "bad sig".to_owned() }; + let json = serde_json::to_string(&status).unwrap(); + let back: TxStatus = serde_json::from_str(&json).unwrap(); + assert!(matches!(back, TxStatus::Rejected { .. })); + } +} From 808ee082d1d733a60093b1ea50924077e7b411b8 Mon Sep 17 00:00:00 2001 From: paschal533 Date: Sat, 11 Apr 2026 23:51:26 +0100 Subject: [PATCH 04/14] feat(storage): add cf_rejected_tx column family with put/get methods Registers the new rejected-transaction column family in RocksDB, and exposes rejected_tx_column, put_rejected_tx, and get_rejected_tx on RocksDBIO for storing and retrieving RejectedTxRecord by hash. --- storage/src/sequencer/mod.rs | 50 +++++++++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/storage/src/sequencer/mod.rs b/storage/src/sequencer/mod.rs index 508f6c29d..103fb8254 100644 --- a/storage/src/sequencer/mod.rs +++ b/storage/src/sequencer/mod.rs @@ -29,6 +29,9 @@ pub const DB_NSSA_STATE_KEY: &str = "nssa_state"; /// Name of state column family. pub const CF_NSSA_STATE_NAME: &str = "cf_nssa_state"; +/// Name of the rejected-transaction column family. +pub const CF_REJECTED_TX_NAME: &str = "cf_rejected_tx"; + pub struct RocksDBIO { pub db: DBWithThreadMode, } @@ -51,6 +54,7 @@ impl RocksDBIO { let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone()); let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone()); let cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone()); + let cfrejected = ColumnFamilyDescriptor::new(CF_REJECTED_TX_NAME, cf_opts.clone()); let mut db_opts = Options::default(); db_opts.create_missing_column_families(true); @@ -58,7 +62,7 @@ impl RocksDBIO { let db = DBWithThreadMode::::open_cf_descriptors( &db_opts, path, - vec![cfb, cfmeta, cfstate], + vec![cfb, cfmeta, cfstate, cfrejected], ) .map_err(|err| DbError::RocksDbError { error: err, @@ -119,6 +123,50 @@ impl RocksDBIO { .expect("State should exist") } + pub fn rejected_tx_column(&self) -> Arc> { + self.db + .cf_handle(CF_REJECTED_TX_NAME) + .expect("Rejected TX column should exist") + } + + pub fn put_rejected_tx( + &self, + hash: common::HashType, + record: &common::receipt::RejectedTxRecord, + ) -> crate::DbResult<()> { + let cf = self.rejected_tx_column(); + let value = borsh::to_vec(record).map_err(|err| { + crate::error::DbError::borsh_cast_message( + err, + Some("Failed to serialize RejectedTxRecord".to_owned()), + ) + })?; + self.db + .put_cf(&cf, hash.as_ref(), value) + .map_err(|err| crate::error::DbError::rocksdb_cast_message(err, None)) + } + + pub fn get_rejected_tx( + &self, + hash: common::HashType, + ) -> crate::DbResult> { + let cf = self.rejected_tx_column(); + let bytes = self + .db + .get_cf(&cf, hash.as_ref()) + .map_err(|err| crate::error::DbError::rocksdb_cast_message(err, None))?; + bytes + .map(|b| { + borsh::from_slice::(&b).map_err(|err| { + crate::error::DbError::borsh_cast_message( + err, + Some("Failed to deserialize RejectedTxRecord".to_owned()), + ) + }) + }) + .transpose() + } + // Meta pub fn get_meta_first_block_in_db(&self) -> DbResult { From 9b4e76fa510506f9267885742109b3c7ede37740 Mon Sep 17 00:00:00 2001 From: paschal533 Date: Sat, 11 Apr 2026 23:53:43 +0100 Subject: [PATCH 05/14] feat(sequencer-core): add store/get_rejected_tx and get_block_id_for_tx to SequencerStore --- sequencer/core/src/block_store.rs | 87 +++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/sequencer/core/src/block_store.rs b/sequencer/core/src/block_store.rs index 7e47005df..832f60a2d 100644 --- a/sequencer/core/src/block_store.rs +++ b/sequencer/core/src/block_store.rs @@ -103,6 +103,27 @@ impl SequencerStore { pub fn get_nssa_state(&self) -> Option { self.dbio.get_nssa_state().ok() } + + pub fn store_rejected_tx( + &mut self, + hash: common::HashType, + reason: String, + block_height: u64, + timestamp_ms: u64, + ) -> anyhow::Result<()> { + use common::receipt::RejectedTxRecord; + let record = RejectedTxRecord { reason, timestamp_ms, block_height }; + Ok(self.dbio.put_rejected_tx(hash, &record)?) + } + + pub fn get_rejected_tx(&self, hash: common::HashType) -> Option { + self.dbio.get_rejected_tx(hash).ok().flatten() + } + + /// Returns the block_id that contains this transaction, or `None` if not yet included. + pub fn get_block_id_for_tx(&self, hash: common::HashType) -> Option { + self.tx_hash_to_block_map.get(&hash).copied() + } } pub(crate) fn block_to_transactions_map(block: &Block) -> HashMap { @@ -264,4 +285,70 @@ mod tests { common::block::BedrockStatus::Finalized )); } + + #[test] + fn store_and_get_rejected_tx() { + let temp_dir = tempdir().unwrap(); + let signing_key = sequencer_sign_key_for_testing(); + let genesis_block_hashable_data = HashableBlockData { + block_id: 0, + prev_block_hash: HashType([0; 32]), + timestamp: 0, + transactions: vec![], + }; + let genesis_block = genesis_block_hashable_data.into_pending_block(&signing_key, [0; 32]); + let mut store = + SequencerStore::open_db_with_genesis(temp_dir.path(), &genesis_block, [0; 32], signing_key) + .unwrap(); + + let hash = HashType([42; 32]); + store + .store_rejected_tx(hash, "bad nonce".to_owned(), 1, 1_000_000) + .unwrap(); + + let record = store.get_rejected_tx(hash).unwrap(); + assert_eq!(record.reason, "bad nonce"); + assert_eq!(record.block_height, 1); + } + + #[test] + fn get_block_id_for_tx_returns_none_when_not_included() { + let temp_dir = tempdir().unwrap(); + let signing_key = sequencer_sign_key_for_testing(); + let genesis_block_hashable_data = HashableBlockData { + block_id: 0, + prev_block_hash: HashType([0; 32]), + timestamp: 0, + transactions: vec![], + }; + let genesis_block = genesis_block_hashable_data.into_pending_block(&signing_key, [0; 32]); + let store = + SequencerStore::open_db_with_genesis(temp_dir.path(), &genesis_block, [0; 32], signing_key) + .unwrap(); + + assert!(store.get_block_id_for_tx(HashType([1; 32])).is_none()); + } + + #[test] + fn get_block_id_for_tx_returns_block_id_after_inclusion() { + let temp_dir = tempdir().unwrap(); + let signing_key = sequencer_sign_key_for_testing(); + let genesis_block_hashable_data = HashableBlockData { + block_id: 0, + prev_block_hash: HashType([0; 32]), + timestamp: 0, + transactions: vec![], + }; + let genesis_block = genesis_block_hashable_data.into_pending_block(&signing_key, [0; 32]); + let mut store = + SequencerStore::open_db_with_genesis(temp_dir.path(), &genesis_block, [0; 32], signing_key) + .unwrap(); + + let tx = common::test_utils::produce_dummy_empty_transaction(); + let block = common::test_utils::produce_dummy_block(1, None, vec![tx.clone()]); + let dummy_state = nssa::V03State::new_with_genesis_accounts(&[], vec![], 0); + store.update(&block, [1; 32], &dummy_state).unwrap(); + + assert_eq!(store.get_block_id_for_tx(tx.hash()), Some(1)); + } } From 836427234dce0b05a9826fca02de0100662cf468 Mon Sep 17 00:00:00 2001 From: paschal533 Date: Sat, 11 Apr 2026 23:55:37 +0100 Subject: [PATCH 06/14] feat(sequencer-core): persist rejection records to RocksDB on block production failure --- sequencer/core/src/lib.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/sequencer/core/src/lib.rs b/sequencer/core/src/lib.rs index cbf8e9105..263b6faf8 100644 --- a/sequencer/core/src/lib.rs +++ b/sequencer/core/src/lib.rs @@ -250,6 +250,14 @@ impl SequencerCore Date: Sat, 11 Apr 2026 23:58:51 +0100 Subject: [PATCH 07/14] feat(sequencer-service): add pending_txs tracking and get_transaction_receipt --- sequencer/service/src/service.rs | 73 +++++++++++++++++++++++++++++++- 1 file changed, 71 insertions(+), 2 deletions(-) diff --git a/sequencer/service/src/service.rs b/sequencer/service/src/service.rs index b1574fdc2..27d7d94d6 100644 --- a/sequencer/service/src/service.rs +++ b/sequencer/service/src/service.rs @@ -1,4 +1,4 @@ -use std::{collections::BTreeMap, sync::Arc}; +use std::{collections::{BTreeMap, HashSet}, sync::Arc}; use common::transaction::{NSSATransaction, TransactionMalformationError}; use jsonrpsee::{ @@ -23,10 +23,11 @@ pub struct SequencerService>>, mempool_handle: MemPoolHandle, max_block_size: u64, + pending_txs: Arc>>, } impl SequencerService { - pub const fn new( + pub fn new( sequencer: Arc>>, mempool_handle: MemPoolHandle, max_block_size: u64, @@ -35,6 +36,7 @@ impl SequencerService Result { + use common::receipt::{TxReceipt, TxStatus}; + + let sequencer = self.sequencer.lock().await; + + // 1. Rejected store: durable, survives restarts. + if let Some(record) = sequencer.block_store().get_rejected_tx(tx_hash) { + return Ok(TxReceipt { + tx_hash, + status: TxStatus::Rejected { reason: record.reason }, + timestamp_ms: Some(record.timestamp_ms), + }); + } + + // 2. Block store: finalized and pending blocks. + if let Some(block_id) = sequencer.block_store().get_block_id_for_tx(tx_hash) { + return Ok(TxReceipt { + tx_hash, + status: TxStatus::Included { block_id }, + timestamp_ms: None, + }); + } + + // Release the sequencer lock before checking the pending set. + drop(sequencer); + + // 3. Pending set: submitted to mempool but not yet in a block. + if self.pending_txs.lock().await.contains(&tx_hash) { + return Ok(TxReceipt { tx_hash, status: TxStatus::Pending, timestamp_ms: None }); + } + + // 4. Unknown: never submitted, invalid hash, or set was evicted. + Ok(TxReceipt { tx_hash, status: TxStatus::Unknown, timestamp_ms: None }) + } + async fn get_accounts_nonces( &self, account_ids: Vec, @@ -265,4 +307,31 @@ mod tests { assert!(result.is_ok()); assert_eq!(result.unwrap(), expected_hash); } + + #[tokio::test] + async fn get_receipt_returns_pending_after_submit() { + use sequencer_service_rpc::RpcServer as _; + let service = make_service().await; + let tx = common::test_utils::produce_dummy_empty_transaction(); + let tx_hash = tx.hash(); + service.send_transaction(tx).await.unwrap(); + let receipt = service.get_transaction_receipt(tx_hash).await.unwrap(); + assert!( + matches!(receipt.status, common::receipt::TxStatus::Pending) + || matches!(receipt.status, common::receipt::TxStatus::Included { .. }), + "Expected Pending or Included, got {:?}", + receipt.status + ); + } + + #[tokio::test] + async fn get_receipt_returns_unknown_for_unseen_hash() { + use sequencer_service_rpc::RpcServer as _; + let service = make_service().await; + let receipt = service + .get_transaction_receipt(HashType([0xff; 32])) + .await + .unwrap(); + assert!(matches!(receipt.status, common::receipt::TxStatus::Unknown)); + } } From 81d22aaeece713e5e7cf0b5abcf94e199c69ce8c Mon Sep 17 00:00:00 2001 From: paschal533 Date: Sun, 12 Apr 2026 00:00:57 +0100 Subject: [PATCH 08/14] feat(sequencer-rpc): add getTransactionReceipt to RPC trait and protocol exports --- sequencer/service/protocol/src/lib.rs | 7 ++++++- sequencer/service/rpc/src/lib.rs | 8 +++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/sequencer/service/protocol/src/lib.rs b/sequencer/service/protocol/src/lib.rs index ec0020ac9..23b28d7de 100644 --- a/sequencer/service/protocol/src/lib.rs +++ b/sequencer/service/protocol/src/lib.rs @@ -1,5 +1,10 @@ //! Reexports of types used by sequencer rpc specification. -pub use common::{HashType, block::Block, transaction::NSSATransaction}; +pub use common::{ + HashType, + block::Block, + receipt::{TxReceipt, TxStatus}, + transaction::NSSATransaction, +}; pub use nssa::{Account, AccountId, ProgramId}; pub use nssa_core::{BlockId, Commitment, MembershipProof, account::Nonce}; diff --git a/sequencer/service/rpc/src/lib.rs b/sequencer/service/rpc/src/lib.rs index 6c03cdb63..0dc8b5a76 100644 --- a/sequencer/service/rpc/src/lib.rs +++ b/sequencer/service/rpc/src/lib.rs @@ -7,7 +7,7 @@ use jsonrpsee::types::ErrorObjectOwned; pub use jsonrpsee::{core::ClientError, http_client::HttpClientBuilder as SequencerClientBuilder}; use sequencer_service_protocol::{ Account, AccountId, Block, BlockId, Commitment, HashType, MembershipProof, NSSATransaction, - Nonce, ProgramId, + Nonce, ProgramId, TxReceipt, }; #[cfg(all(not(feature = "server"), not(feature = "client")))] @@ -70,6 +70,12 @@ pub trait Rpc { tx_hash: HashType, ) -> Result, ErrorObjectOwned>; + #[method(name = "getTransactionReceipt")] + async fn get_transaction_receipt( + &self, + tx_hash: HashType, + ) -> Result; + #[method(name = "getAccountsNonces")] async fn get_accounts_nonces( &self, From 58853c542f5d578358babd414e5806eb7d07a094 Mon Sep 17 00:00:00 2001 From: paschal533 Date: Sun, 12 Apr 2026 00:24:19 +0100 Subject: [PATCH 09/14] fix(sequencer-service): prune pending_txs on terminal receipt and populate block timestamp Restructure get_transaction_receipt to scope the sequencer lock to a block expression, collect a terminal receipt (Rejected/Included) under the lock, release the lock, then lazily remove the hash from pending_txs before returning. This fixes two related issues: - pending_txs grew without bound because insert() in send_transaction had no corresponding remove() when TXs were confirmed or rejected - a TX that had been included could still appear as Pending if the caller never queried its receipt Also populate timestamp_ms for Included receipts by fetching the block from the store (an additional DB read already cached by RocksDB's block cache), replacing the previous None placeholder. Lock ordering is preserved: sequencer lock is always released before pending_txs is acquired, matching the order in send_transaction. --- sequencer/service/src/service.rs | 55 ++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 20 deletions(-) diff --git a/sequencer/service/src/service.rs b/sequencer/service/src/service.rs index 27d7d94d6..f220e9598 100644 --- a/sequencer/service/src/service.rs +++ b/sequencer/service/src/service.rs @@ -142,29 +142,44 @@ impl Result { use common::receipt::{TxReceipt, TxStatus}; - let sequencer = self.sequencer.lock().await; - - // 1. Rejected store: durable, survives restarts. - if let Some(record) = sequencer.block_store().get_rejected_tx(tx_hash) { - return Ok(TxReceipt { - tx_hash, - status: TxStatus::Rejected { reason: record.reason }, - timestamp_ms: Some(record.timestamp_ms), - }); - } + // Check durable tiers under the sequencer lock, then release it before + // touching `pending_txs` to preserve lock-ordering invariants. + let terminal_receipt = { + let sequencer = self.sequencer.lock().await; + + // 1. Rejected store: durable, survives restarts. + if let Some(record) = sequencer.block_store().get_rejected_tx(tx_hash) { + Some(TxReceipt { + tx_hash, + status: TxStatus::Rejected { reason: record.reason }, + timestamp_ms: Some(record.timestamp_ms), + }) + // 2. Block store: finalized and pending blocks. + } else if let Some(block_id) = sequencer.block_store().get_block_id_for_tx(tx_hash) { + let timestamp_ms = sequencer + .block_store() + .get_block_at_id(block_id) + .ok() + .flatten() + .map(|b| b.header.timestamp); + Some(TxReceipt { + tx_hash, + status: TxStatus::Included { block_id }, + timestamp_ms, + }) + } else { + None + } + // Sequencer lock released here. + }; - // 2. Block store: finalized and pending blocks. - if let Some(block_id) = sequencer.block_store().get_block_id_for_tx(tx_hash) { - return Ok(TxReceipt { - tx_hash, - status: TxStatus::Included { block_id }, - timestamp_ms: None, - }); + if let Some(receipt) = terminal_receipt { + // Lazy eviction: TX has reached a terminal state; prune it from the + // pending set so `pending_txs` does not grow without bound. + self.pending_txs.lock().await.remove(&tx_hash); + return Ok(receipt); } - // Release the sequencer lock before checking the pending set. - drop(sequencer); - // 3. Pending set: submitted to mempool but not yet in a block. if self.pending_txs.lock().await.contains(&tx_hash) { return Ok(TxReceipt { tx_hash, status: TxStatus::Pending, timestamp_ms: None }); From 10093eaf505f2fbc0da62834aec9bdd8cfe0e945 Mon Sep 17 00:00:00 2001 From: paschal533 Date: Sun, 12 Apr 2026 00:02:21 +0100 Subject: [PATCH 10/14] feat(common): add SimulationResult type --- common/src/lib.rs | 1 + common/src/simulation.rs | 31 +++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 common/src/simulation.rs diff --git a/common/src/lib.rs b/common/src/lib.rs index 6c90b36a1..cc1b4eae8 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -8,6 +8,7 @@ mod borsh_base64; pub mod config; pub mod transaction; pub mod receipt; +pub mod simulation; // Module for tests utility functions // TODO: Compile only for tests diff --git a/common/src/simulation.rs b/common/src/simulation.rs new file mode 100644 index 000000000..4b5ce407c --- /dev/null +++ b/common/src/simulation.rs @@ -0,0 +1,31 @@ +use nssa::{Account, AccountId}; +use nssa_core::{Commitment, Nullifier}; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SimulationResult { + pub success: bool, + pub error: Option, + pub accounts_modified: Vec<(AccountId, Account)>, + pub nullifiers_created: Vec, + pub commitments_created: Vec, +} + +#[cfg(test)] +mod tests { + #[test] + fn simulation_result_serde_roundtrip() { + use super::SimulationResult; + let result = SimulationResult { + success: true, + error: None, + accounts_modified: vec![], + nullifiers_created: vec![], + commitments_created: vec![], + }; + let json = serde_json::to_string(&result).unwrap(); + let back: SimulationResult = serde_json::from_str(&json).unwrap(); + assert!(back.success); + assert!(back.error.is_none()); + } +} From 3b0da2e330c1a3f8dc11f20f054813aeb1aea775 Mon Sep 17 00:00:00 2001 From: paschal533 Date: Sun, 12 Apr 2026 00:03:49 +0100 Subject: [PATCH 11/14] feat(nssa): expose new_nullifiers and new_commitments on ValidatedStateDiff --- nssa/src/validated_state_diff.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/nssa/src/validated_state_diff.rs b/nssa/src/validated_state_diff.rs index 9614d1b7e..957c0c9e5 100644 --- a/nssa/src/validated_state_diff.rs +++ b/nssa/src/validated_state_diff.rs @@ -406,6 +406,18 @@ impl ValidatedStateDiff { self.0.public_diff.clone() } + /// Returns the new nullifiers produced by this transaction. + #[must_use] + pub fn new_nullifiers(&self) -> &[nssa_core::Nullifier] { + &self.0.new_nullifiers + } + + /// Returns the new commitments produced by this transaction. + #[must_use] + pub fn new_commitments(&self) -> &[nssa_core::Commitment] { + &self.0.new_commitments + } + pub(crate) fn into_state_diff(self) -> StateDiff { self.0 } From d1807f8564c12ce313530fa3c76949bb424aa916 Mon Sep 17 00:00:00 2001 From: paschal533 Date: Sun, 12 Apr 2026 00:07:47 +0100 Subject: [PATCH 12/14] feat(sequencer-service): add simulate_transaction with clone-and-discard state pattern --- sequencer/service/Cargo.toml | 2 + sequencer/service/src/service.rs | 76 ++++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+) diff --git a/sequencer/service/Cargo.toml b/sequencer/service/Cargo.toml index bf9b84455..602d57f2e 100644 --- a/sequencer/service/Cargo.toml +++ b/sequencer/service/Cargo.toml @@ -26,12 +26,14 @@ jsonrpsee.workspace = true futures.workspace = true bytesize.workspace = true borsh.workspace = true +chrono.workspace = true [dev-dependencies] sequencer_core = { workspace = true, features = ["mock"] } bedrock_client.workspace = true logos-blockchain-core.workspace = true tempfile.workspace = true +testnet_initial_state.workspace = true [features] default = [] diff --git a/sequencer/service/src/service.rs b/sequencer/service/src/service.rs index f220e9598..3dea0c710 100644 --- a/sequencer/service/src/service.rs +++ b/sequencer/service/src/service.rs @@ -1,5 +1,6 @@ use std::{collections::{BTreeMap, HashSet}, sync::Arc}; +use chrono; use common::transaction::{NSSATransaction, TransactionMalformationError}; use jsonrpsee::{ core::async_trait, @@ -189,6 +190,47 @@ impl Result { + use common::simulation::SimulationResult; + + // 1. Stateless check -- no lock required. + let tx = tx + .transaction_stateless_check() + .inspect_err(|err| warn!("simulate_transaction: stateless check failed: {err:#?}")) + .map_err(ErrorObjectOwned::from)?; + + // 2. Clone state under lock, then release immediately. + let (state_clone, block_id, timestamp_ms) = { + let sequencer = self.sequencer.lock().await; + let block_id = sequencer.chain_height() + 1; + let timestamp_ms = u64::try_from(chrono::Utc::now().timestamp_millis()) + .expect("current timestamp must be positive"); + (sequencer.state().clone(), block_id, timestamp_ms) + }; + // Lock is released here. Simulation runs concurrently with block production. + + // 3. Execute on the cloned state -- never committed. + match tx.validate_on_state(&state_clone, block_id, timestamp_ms) { + Ok(diff) => Ok(SimulationResult { + success: true, + error: None, + accounts_modified: diff.public_diff().into_iter().collect(), + nullifiers_created: diff.new_nullifiers().to_vec(), + commitments_created: diff.new_commitments().to_vec(), + }), + Err(err) => Ok(SimulationResult { + success: false, + error: Some(err.to_string()), + accounts_modified: vec![], + nullifiers_created: vec![], + commitments_created: vec![], + }), + } + } + async fn get_accounts_nonces( &self, account_ids: Vec, @@ -349,4 +391,38 @@ mod tests { .unwrap(); assert!(matches!(receipt.status, common::receipt::TxStatus::Unknown)); } + + #[tokio::test] + async fn simulate_valid_transaction_returns_success() { + use sequencer_service_rpc::RpcServer as _; + let service = make_service().await; + let tx = common::test_utils::produce_dummy_empty_transaction(); + let result = service.simulate_transaction(tx).await.unwrap(); + assert!(result.success, "Expected success, got error: {:?}", result.error); + } + + #[tokio::test] + async fn simulate_does_not_modify_state() { + use sequencer_service_rpc::RpcServer as _; + let service = make_service().await; + let tx = common::test_utils::produce_dummy_empty_transaction(); + + // Simulate the transaction. + let sim_result = service.simulate_transaction(tx.clone()).await.unwrap(); + assert!(sim_result.success); + + // The state read via get_account should be unchanged after simulation. + use testnet_initial_state::initial_pub_accounts_private_keys; + let keys = initial_pub_accounts_private_keys(); + let account_id = keys[0].account_id; + let account_before = service.get_account(account_id).await.unwrap(); + + // Simulate again -- state should not change. + let tx2 = common::test_utils::produce_dummy_empty_transaction(); + let sim_result2 = service.simulate_transaction(tx2).await.unwrap(); + assert!(sim_result2.success); + + let account_after = service.get_account(account_id).await.unwrap(); + assert_eq!(account_before.nonce, account_after.nonce, "Simulation must not modify state"); + } } From 222cbae404d7b94701d036408839f5ef5431597a Mon Sep 17 00:00:00 2001 From: paschal533 Date: Sun, 12 Apr 2026 00:09:37 +0100 Subject: [PATCH 13/14] feat(sequencer-rpc): add simulateTransaction to RPC trait and protocol exports --- sequencer/service/protocol/src/lib.rs | 1 + sequencer/service/rpc/src/lib.rs | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/sequencer/service/protocol/src/lib.rs b/sequencer/service/protocol/src/lib.rs index 23b28d7de..7961e8eb1 100644 --- a/sequencer/service/protocol/src/lib.rs +++ b/sequencer/service/protocol/src/lib.rs @@ -4,6 +4,7 @@ pub use common::{ HashType, block::Block, receipt::{TxReceipt, TxStatus}, + simulation::SimulationResult, transaction::NSSATransaction, }; pub use nssa::{Account, AccountId, ProgramId}; diff --git a/sequencer/service/rpc/src/lib.rs b/sequencer/service/rpc/src/lib.rs index 0dc8b5a76..683abf1f6 100644 --- a/sequencer/service/rpc/src/lib.rs +++ b/sequencer/service/rpc/src/lib.rs @@ -7,7 +7,7 @@ use jsonrpsee::types::ErrorObjectOwned; pub use jsonrpsee::{core::ClientError, http_client::HttpClientBuilder as SequencerClientBuilder}; use sequencer_service_protocol::{ Account, AccountId, Block, BlockId, Commitment, HashType, MembershipProof, NSSATransaction, - Nonce, ProgramId, TxReceipt, + Nonce, ProgramId, SimulationResult, TxReceipt, }; #[cfg(all(not(feature = "server"), not(feature = "client")))] @@ -76,6 +76,12 @@ pub trait Rpc { tx_hash: HashType, ) -> Result; + #[method(name = "simulateTransaction")] + async fn simulate_transaction( + &self, + tx: NSSATransaction, + ) -> Result; + #[method(name = "getAccountsNonces")] async fn get_accounts_nonces( &self, From 347617ece6b2483e03203536aedcf1c15c170e59 Mon Sep 17 00:00:00 2001 From: paschal533 Date: Tue, 14 Apr 2026 12:58:49 +0100 Subject: [PATCH 14/14] feat(sequencer): add local fork mode via --fork CLI flag Implements the local fork mode described in issue #379. When --features standalone is active, passing `--fork ` fetches the remote sequencer's execution state snapshot via the new `getStateSnapshot` RPC method and boots the local sequencer from that state. Changes: - common: add StateSnapshot type (opaque Borsh bytes + block_id) - sequencer/core: add override_initial_state field to SequencerConfig; start_from_config consults it before genesis/DB state - sequencer/service/rpc: add getStateSnapshot to RPC trait - sequencer/service: implement get_state_snapshot, add run_forked(), add --fork CLI arg, expose fork module; add service test for snapshot --- Cargo.lock | 7 +++++ common/src/lib.rs | 1 + common/src/snapshot.rs | 39 +++++++++++++++++++++++++++ sequencer/core/src/config.rs | 6 +++++ sequencer/core/src/lib.rs | 7 +++-- sequencer/service/Cargo.toml | 1 + sequencer/service/protocol/src/lib.rs | 1 + sequencer/service/rpc/src/lib.rs | 8 +++++- sequencer/service/src/fork.rs | 25 +++++++++++++++++ sequencer/service/src/lib.rs | 19 +++++++++++++ sequencer/service/src/main.rs | 25 ++++++++++++++--- sequencer/service/src/service.rs | 29 ++++++++++++++++++++ 12 files changed, 162 insertions(+), 6 deletions(-) create mode 100644 common/src/snapshot.rs create mode 100644 sequencer/service/src/fork.rs diff --git a/Cargo.lock b/Cargo.lock index 9e1d157c4..9e6f7b3b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1521,11 +1521,13 @@ dependencies = [ "borsh", "clock_core", "hex", + "jsonrpsee", "log", "logos-blockchain-common-http-client", "nssa", "nssa_core", "serde", + "serde_json", "serde_with", "sha2", "thiserror 2.0.18", @@ -7173,8 +7175,10 @@ name = "sequencer_service" version = "0.1.0" dependencies = [ "anyhow", + "bedrock_client", "borsh", "bytesize", + "chrono", "clap", "common", "env_logger", @@ -7182,11 +7186,14 @@ dependencies = [ "indexer_service_rpc", "jsonrpsee", "log", + "logos-blockchain-core", "mempool", "nssa", "sequencer_core", "sequencer_service_protocol", "sequencer_service_rpc", + "tempfile", + "testnet_initial_state", "tokio", "tokio-util", ] diff --git a/common/src/lib.rs b/common/src/lib.rs index cc1b4eae8..dfe1242a2 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -9,6 +9,7 @@ pub mod config; pub mod transaction; pub mod receipt; pub mod simulation; +pub mod snapshot; // Module for tests utility functions // TODO: Compile only for tests diff --git a/common/src/snapshot.rs b/common/src/snapshot.rs new file mode 100644 index 000000000..1aaea7ba8 --- /dev/null +++ b/common/src/snapshot.rs @@ -0,0 +1,39 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use nssa_core::BlockId; +use serde::{Deserialize, Serialize}; + +/// A point-in-time snapshot of the sequencer's execution state, returned by the +/// `get_state_snapshot` RPC method and consumed by fork-mode startup. +/// +/// `state_bytes` is an opaque Borsh-serialized `V03State`; callers that need to +/// deserialize it must depend on `nssa` directly. +#[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +pub struct StateSnapshot { + /// Borsh-serialized `V03State` — opaque to keep `common` independent of `nssa`. + pub state_bytes: Vec, + /// Chain height at the moment the snapshot was taken. + pub block_id: BlockId, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn borsh_roundtrip() { + let snapshot = StateSnapshot { state_bytes: vec![1, 2, 3, 4], block_id: 42 }; + let encoded = borsh::to_vec(&snapshot).unwrap(); + let decoded: StateSnapshot = borsh::from_slice(&encoded).unwrap(); + assert_eq!(decoded.block_id, 42); + assert_eq!(decoded.state_bytes, [1, 2, 3, 4]); + } + + #[test] + fn serde_roundtrip() { + let snapshot = StateSnapshot { state_bytes: vec![0xff, 0x00], block_id: 7 }; + let json = serde_json::to_string(&snapshot).unwrap(); + let decoded: StateSnapshot = serde_json::from_str(&json).unwrap(); + assert_eq!(decoded.block_id, 7); + assert_eq!(decoded.state_bytes, [0xff, 0x00]); + } +} diff --git a/sequencer/core/src/config.rs b/sequencer/core/src/config.rs index fa4a2fa7e..3fb9b7cbe 100644 --- a/sequencer/core/src/config.rs +++ b/sequencer/core/src/config.rs @@ -11,6 +11,7 @@ use bytesize::ByteSize; use common::config::BasicAuth; use humantime_serde; use logos_blockchain_core::mantle::ops::channel::ChannelId; +use nssa::V03State; use serde::{Deserialize, Serialize}; use testnet_initial_state::{PrivateAccountPublicInitialData, PublicAccountPublicInitialData}; use url::Url; @@ -48,6 +49,11 @@ pub struct SequencerConfig { pub initial_public_accounts: Option>, #[serde(skip_serializing_if = "Option::is_none")] pub initial_private_accounts: Option>, + /// Injected programmatically for fork mode; never read from or written to config files. + /// When `Some`, this state is used directly as the initial state, bypassing genesis and + /// `initial_public_accounts`/`initial_private_accounts`. + #[serde(skip)] + pub override_initial_state: Option>, } #[derive(Clone, Serialize, Deserialize)] diff --git a/sequencer/core/src/lib.rs b/sequencer/core/src/lib.rs index 263b6faf8..2ec822805 100644 --- a/sequencer/core/src/lib.rs +++ b/sequencer/core/src/lib.rs @@ -53,7 +53,7 @@ impl SequencerCore (Self, MemPoolHandle) { let hashable_data = HashableBlockData { block_id: config.genesis_id, @@ -95,7 +95,10 @@ impl SequencerCore Result, ErrorObjectOwned>; + /// Returns a snapshot of the current execution state as opaque Borsh bytes. + /// Intended for fork mode: a local sequencer can bootstrap from this snapshot + /// instead of replaying blocks from genesis. + #[method(name = "getStateSnapshot")] + async fn get_state_snapshot(&self) -> Result; + // ============================================================================================= } diff --git a/sequencer/service/src/fork.rs b/sequencer/service/src/fork.rs new file mode 100644 index 000000000..930e8263e --- /dev/null +++ b/sequencer/service/src/fork.rs @@ -0,0 +1,25 @@ +use anyhow::{Context as _, Result}; +use nssa::V03State; +use nssa_core::BlockId; +use sequencer_service_rpc::{RpcClient as _, SequencerClientBuilder}; +use url::Url; + +/// Connects to a running sequencer at `url`, fetches a state snapshot, and deserializes it. +/// +/// Returns `(forked_state, fork_block_id)`. The caller passes these to `run_forked` so the +/// local sequencer starts from the remote chain's current height and account state. +pub async fn fetch_fork_state(url: &Url) -> Result<(V03State, BlockId)> { + let client = SequencerClientBuilder::default() + .build(url.as_str()) + .with_context(|| format!("Failed to connect to remote sequencer at {url}"))?; + + let snapshot = client + .get_state_snapshot() + .await + .with_context(|| format!("get_state_snapshot RPC failed against {url}"))?; + + let state = borsh::from_slice::(&snapshot.state_bytes) + .context("Failed to deserialize forked V03State from snapshot bytes")?; + + Ok((state, snapshot.block_id)) +} diff --git a/sequencer/service/src/lib.rs b/sequencer/service/src/lib.rs index 5373b31fb..db90a179a 100644 --- a/sequencer/service/src/lib.rs +++ b/sequencer/service/src/lib.rs @@ -18,6 +18,8 @@ use sequencer_service_rpc::RpcServer as _; use tokio::{sync::Mutex, task::JoinHandle}; pub mod service; +#[cfg(feature = "standalone")] +pub mod fork; const REQUEST_BODY_MAX_SIZE: ByteSize = ByteSize::mib(10); @@ -189,6 +191,23 @@ pub async fn run(config: SequencerConfig, port: u16) -> Result )) } +/// Start the sequencer from a remote chain's state snapshot (fork mode). +/// +/// Injects the provided `forked_state` as the initial execution state and sets +/// the genesis block ID to `fork_block_id`, so new blocks are produced starting +/// at `fork_block_id + 1`. Only available with the `standalone` feature. +#[cfg(feature = "standalone")] +pub async fn run_forked( + mut config: SequencerConfig, + port: u16, + forked_state: nssa::V03State, + fork_block_id: nssa_core::BlockId, +) -> Result { + config.genesis_id = fork_block_id; + config.override_initial_state = Some(Box::new(forked_state)); + run(config, port).await +} + async fn run_server( sequencer: Arc>, mempool_handle: MemPoolHandle, diff --git a/sequencer/service/src/main.rs b/sequencer/service/src/main.rs index e78ad502e..b33d89bdd 100644 --- a/sequencer/service/src/main.rs +++ b/sequencer/service/src/main.rs @@ -12,6 +12,12 @@ struct Args { config_path: PathBuf, #[clap(short, long, default_value = "3040")] port: u16, + /// Fork initial state from a running sequencer at this URL (requires `--features standalone`). + /// The local sequencer will start producing blocks from the remote chain's current head. + /// Example: `--fork http://localhost:3040` + #[cfg(feature = "standalone")] + #[clap(long)] + fork: Option, } #[tokio::main] @@ -22,12 +28,25 @@ struct Args { async fn main() -> Result<()> { env_logger::init(); - let Args { config_path, port } = Args::parse(); + let args = Args::parse(); let cancellation_token = listen_for_shutdown_signal(); - let config = sequencer_service::SequencerConfig::from_path(&config_path)?; - let sequencer_handle = sequencer_service::run(config, port).await?; + let config = sequencer_service::SequencerConfig::from_path(&args.config_path)?; + + #[cfg(feature = "standalone")] + let sequencer_handle = if let Some(ref fork_url) = args.fork { + info!("Fork mode: fetching state snapshot from {fork_url}"); + let (forked_state, fork_block_id) = + sequencer_service::fork::fetch_fork_state(fork_url).await?; + info!("Forked state fetched at block {fork_block_id}, starting local sequencer"); + sequencer_service::run_forked(config, args.port, forked_state, fork_block_id).await? + } else { + sequencer_service::run(config, args.port).await? + }; + + #[cfg(not(feature = "standalone"))] + let sequencer_handle = sequencer_service::run(config, args.port).await?; tokio::select! { () = cancellation_token.cancelled() => { diff --git a/sequencer/service/src/service.rs b/sequencer/service/src/service.rs index 3dea0c710..ccb39d720 100644 --- a/sequencer/service/src/service.rs +++ b/sequencer/service/src/service.rs @@ -256,6 +256,14 @@ impl Result { + let sequencer = self.sequencer.lock().await; + let state_bytes = borsh::to_vec(sequencer.state()).map_err(|e| { + ErrorObjectOwned::owned(ErrorCode::InternalError.code(), e.to_string(), None::<()>) + })?; + Ok(common::snapshot::StateSnapshot { state_bytes, block_id: sequencer.chain_height() }) + } + async fn get_program_ids(&self) -> Result, ErrorObjectOwned> { let mut program_ids = BTreeMap::new(); program_ids.insert( @@ -319,6 +327,7 @@ mod tests { indexer_rpc_url: "ws://localhost:8779".parse().unwrap(), initial_public_accounts: None, initial_private_accounts: None, + override_initial_state: None, } } @@ -425,4 +434,24 @@ mod tests { let account_after = service.get_account(account_id).await.unwrap(); assert_eq!(account_before.nonce, account_after.nonce, "Simulation must not modify state"); } + + #[tokio::test] + async fn get_state_snapshot_returns_deserializable_state() { + use nssa::V03State; + use sequencer_service_rpc::RpcServer as _; + + let service = make_service().await; + let snapshot = service.get_state_snapshot().await.unwrap(); + + // The returned bytes must round-trip through Borsh. + assert!( + !snapshot.state_bytes.is_empty(), + "Snapshot state_bytes must not be empty" + ); + let decoded = borsh::from_slice::(&snapshot.state_bytes); + assert!(decoded.is_ok(), "Snapshot bytes must deserialize to V03State"); + + // block_id reflects the sequencer's chain height at snapshot time. + assert_eq!(snapshot.block_id, 1, "Fresh sequencer genesis block id should be 1"); + } }