From c314bc2107db2459d40bb52e535b9a162fb91629 Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 9 Mar 2026 15:49:12 +0800 Subject: [PATCH 01/76] chore(flashblocks-rpc): clean up and refactor dirs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.6 --- Cargo.lock | 3 - bin/node/src/main.rs | 3 +- crates/flashblocks/Cargo.toml | 3 - crates/flashblocks/src/cache.rs | 16 +- crates/flashblocks/src/consensus.rs | 458 ------------------ .../src/{tx_cache.rs => execution/cache.rs} | 56 +-- crates/flashblocks/src/execution/mod.rs | 4 + .../flashblocks/src/{ => execution}/worker.rs | 191 ++++---- .../flashblocks/src/{handler.rs => handle.rs} | 0 crates/flashblocks/src/lib.rs | 34 +- crates/flashblocks/src/service.rs | 38 +- crates/flashblocks/src/subscription/mod.rs | 4 + .../src/{ => subscription}/pubsub.rs | 8 +- .../{subscription.rs => subscription/rpc.rs} | 17 +- crates/flashblocks/src/types/mod.rs | 9 + crates/flashblocks/src/{ => types}/payload.rs | 0 .../src/{ => types}/pending_state.rs | 0 .../flashblocks/src/{ => types}/sequence.rs | 0 18 files changed, 185 insertions(+), 659 deletions(-) delete mode 100644 crates/flashblocks/src/consensus.rs rename crates/flashblocks/src/{tx_cache.rs => execution/cache.rs} (92%) create mode 100644 crates/flashblocks/src/execution/mod.rs rename crates/flashblocks/src/{ => execution}/worker.rs (86%) rename crates/flashblocks/src/{handler.rs => handle.rs} (100%) create mode 100644 crates/flashblocks/src/subscription/mod.rs rename crates/flashblocks/src/{ => subscription}/pubsub.rs (99%) rename crates/flashblocks/src/{subscription.rs => subscription/rpc.rs} (99%) create mode 100644 crates/flashblocks/src/types/mod.rs rename crates/flashblocks/src/{ => types}/payload.rs (100%) rename crates/flashblocks/src/{ => types}/pending_state.rs (100%) rename crates/flashblocks/src/{ => types}/sequence.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index b0271637..f571932b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14263,7 +14263,6 @@ dependencies = [ "op-alloy-rpc-types-engine", "op-revm", "reth-chain-state", - "reth-engine-primitives", "reth-errors", "reth-evm", "reth-execution-types", @@ -14273,9 +14272,7 @@ dependencies = [ "reth-optimism-chainspec", "reth-optimism-evm", "reth-optimism-flashblocks", - "reth-optimism-payload-builder", "reth-optimism-primitives", - "reth-payload-primitives", "reth-primitives-traits", "reth-provider", "reth-revm", diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index 280a9e80..41cb4ad7 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -23,8 +23,7 @@ use reth_optimism_node::{args::RollupArgs, OpNode}; use reth_rpc_server_types::RethRpcModule; use xlayer_chainspec::XLayerChainSpecParser; -use xlayer_flashblocks::handler::FlashblocksService; -use xlayer_flashblocks::subscription::FlashblocksPubSub; +use xlayer_flashblocks::{handle::FlashblocksService, subscription::FlashblocksPubSub}; use xlayer_legacy_rpc::{layer::LegacyRpcRouterLayer, LegacyRpcRouterConfig}; use xlayer_monitor::{start_monitor_handle, RpcMonitorLayer, XLayerMonitor}; use xlayer_rpc::xlayer_ext::{XlayerRpcExt, XlayerRpcExtApiServer}; diff --git a/crates/flashblocks/Cargo.toml b/crates/flashblocks/Cargo.toml index 0b326de5..7bbecfd0 100644 --- a/crates/flashblocks/Cargo.toml +++ b/crates/flashblocks/Cargo.toml @@ -15,16 +15,13 @@ xlayer-builder.workspace = true # reth reth-chain-state = { workspace = true, features = ["serde"] } -reth-engine-primitives = { workspace = true, features = ["std"] } reth-errors.workspace = true reth-evm.workspace = true reth-execution-types = { workspace = true, features = ["serde"] } reth-metrics.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true -reth-optimism-payload-builder.workspace = true reth-optimism-primitives = { workspace = true, features = ["serde"] } -reth-payload-primitives.workspace = true reth-primitives-traits = { workspace = true, features = ["serde"] } reth-revm.workspace = true reth-rpc.workspace = true diff --git a/crates/flashblocks/src/cache.rs b/crates/flashblocks/src/cache.rs index 93b5ad90..b8129d92 100644 --- a/crates/flashblocks/src/cache.rs +++ b/crates/flashblocks/src/cache.rs @@ -4,13 +4,13 @@ //! and intelligently selects which sequence to build based on the local chain tip. use crate::{ - pending_state::PendingBlockState, - sequence::{FlashBlockPendingSequence, SequenceExecutionOutcome}, + types::pending_state::PendingBlockState, + types::sequence::{FlashBlockPendingSequence, SequenceExecutionOutcome}, validation::{ CanonicalBlockFingerprint, CanonicalBlockReconciler, ReconciliationStrategy, ReorgDetector, TrackedBlockFingerprint, }, - worker::BuildArgs, + execution::worker::BuildArgs, FlashBlock, FlashBlockCompleteSequence, PendingFlashBlock, }; use alloy_eips::eip2718::WithEncoded; @@ -1046,7 +1046,7 @@ mod tests { #[test] fn test_next_buildable_args_skips_executed_cached_and_advances_speculative() { - use crate::pending_state::PendingBlockState; + use crate::types::pending_state::PendingBlockState; use reth_execution_types::BlockExecutionOutput; use reth_revm::cached::CachedReads; use std::sync::Arc; @@ -1151,7 +1151,7 @@ mod tests { #[test] fn test_delayed_canonical_allows_speculative_next_block_index_zero() { - use crate::pending_state::PendingBlockState; + use crate::types::pending_state::PendingBlockState; use reth_execution_types::BlockExecutionOutput; use reth_revm::cached::CachedReads; use std::sync::Arc; @@ -1726,7 +1726,7 @@ mod tests { #[test] fn test_speculative_build_with_pending_parent_state() { - use crate::pending_state::PendingBlockState; + use crate::types::pending_state::PendingBlockState; use reth_execution_types::BlockExecutionOutput; use reth_revm::cached::CachedReads; use std::sync::Arc; @@ -1769,7 +1769,7 @@ mod tests { #[test] fn test_speculative_build_uses_cached_sequence() { - use crate::pending_state::PendingBlockState; + use crate::types::pending_state::PendingBlockState; use reth_execution_types::BlockExecutionOutput; use reth_revm::cached::CachedReads; use std::sync::Arc; @@ -1815,7 +1815,7 @@ mod tests { #[test] fn test_canonical_build_takes_priority_over_speculative() { - use crate::pending_state::PendingBlockState; + use crate::types::pending_state::PendingBlockState; use reth_execution_types::BlockExecutionOutput; use reth_revm::cached::CachedReads; use std::sync::Arc; diff --git a/crates/flashblocks/src/consensus.rs b/crates/flashblocks/src/consensus.rs deleted file mode 100644 index 453d9bff..00000000 --- a/crates/flashblocks/src/consensus.rs +++ /dev/null @@ -1,458 +0,0 @@ -use crate::{FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx}; -use alloy_primitives::B256; -use alloy_rpc_types_engine::PayloadStatusEnum; -use op_alloy_rpc_types_engine::OpExecutionData; -use reth_engine_primitives::ConsensusEngineHandle; -use reth_optimism_payload_builder::OpPayloadTypes; -use reth_payload_primitives::{EngineApiMessageVersion, ExecutionPayload, PayloadTypes}; -use tracing::*; - -/// Consensus client that sends FCUs and new payloads using blocks from a [`FlashBlockService`]. -/// -/// This client receives completed flashblock sequences and: -/// - Attempts to submit `engine_newPayload` if `state_root` is available (non-zero) -/// - Always sends `engine_forkChoiceUpdated` to drive chain forward -/// -/// [`FlashBlockService`]: crate::FlashBlockService -#[derive(Debug)] -pub struct FlashBlockConsensusClient

-where - P: PayloadTypes, -{ - /// Handle to execution client. - engine_handle: ConsensusEngineHandle

, - /// Receiver for completed flashblock sequences from `FlashBlockService`. - sequence_receiver: FlashBlockCompleteSequenceRx, -} - -impl

FlashBlockConsensusClient

-where - P: PayloadTypes, - P::ExecutionData: for<'a> TryFrom<&'a FlashBlockCompleteSequence, Error: std::fmt::Display>, -{ - /// Create a new `FlashBlockConsensusClient` with the given Op engine and sequence receiver. - pub const fn new( - engine_handle: ConsensusEngineHandle

, - sequence_receiver: FlashBlockCompleteSequenceRx, - ) -> eyre::Result { - Ok(Self { engine_handle, sequence_receiver }) - } - - /// Attempts to submit a new payload to the engine. - /// - /// The `TryFrom` conversion will fail if `execution_outcome.state_root` is `B256::ZERO`, - /// in which case this method uses the `parent_hash` instead to drive the chain forward. - /// - /// Returns the block hash to use for FCU (either the new block's hash or the parent hash). - async fn submit_new_payload(&self, sequence: &FlashBlockCompleteSequence) -> B256 { - let payload = match P::ExecutionData::try_from(sequence) { - Ok(payload) => payload, - Err(err) => { - trace!(target: "flashblocks", %err, "Failed payload conversion, using parent hash"); - return sequence.payload_base().parent_hash; - } - }; - - let block_number = payload.block_number(); - let block_hash = payload.block_hash(); - match self.engine_handle.new_payload(payload).await { - Ok(result) => { - debug!( - target: "flashblocks", - flashblock_count = sequence.count(), - block_number, - %block_hash, - ?result, - "Submitted engine_newPayload", - ); - - if let PayloadStatusEnum::Invalid { validation_error } = result.status { - debug!( - target: "flashblocks", - block_number, - %block_hash, - %validation_error, - "Payload validation error", - ); - }; - } - Err(err) => { - error!( - target: "flashblocks", - %err, - block_number, - "Failed to submit new payload", - ); - } - } - - block_hash - } - - /// Submit a forkchoice update to the engine. - async fn submit_forkchoice_update( - &self, - head_block_hash: B256, - sequence: &FlashBlockCompleteSequence, - ) { - let block_number = sequence.block_number(); - let safe_hash = sequence.payload_base().parent_hash; - let finalized_hash = sequence.payload_base().parent_hash; - let fcu_state = alloy_rpc_types_engine::ForkchoiceState { - head_block_hash, - safe_block_hash: safe_hash, - finalized_block_hash: finalized_hash, - }; - - match self - .engine_handle - .fork_choice_updated(fcu_state, None, EngineApiMessageVersion::V5) - .await - { - Ok(result) => { - debug!( - target: "flashblocks", - flashblock_count = sequence.count(), - block_number, - %head_block_hash, - %safe_hash, - %finalized_hash, - ?result, - "Submitted engine_forkChoiceUpdated", - ) - } - Err(err) => { - error!( - target: "flashblocks", - %err, - block_number, - %head_block_hash, - %safe_hash, - %finalized_hash, - "Failed to submit fork choice update", - ); - } - } - } - - /// Runs the consensus client loop. - /// - /// Continuously receives completed flashblock sequences and submits them to the execution - /// engine: - /// 1. Attempts `engine_newPayload` (only if `state_root` is available) - /// 2. Always sends `engine_forkChoiceUpdated` to drive chain forward - pub async fn run(mut self) { - loop { - let Ok(sequence) = self.sequence_receiver.recv().await else { - continue; - }; - - // Returns block_hash for FCU: - // - If state_root is available: submits newPayload and returns the new block's hash - // - If state_root is zero: skips newPayload and returns parent_hash (no progress yet) - let block_hash = self.submit_new_payload(&sequence).await; - - self.submit_forkchoice_update(block_hash, &sequence).await; - } - } -} - -impl TryFrom<&FlashBlockCompleteSequence> for OpExecutionData { - type Error = &'static str; - - fn try_from(sequence: &FlashBlockCompleteSequence) -> Result { - let mut data = Self::from_flashblocks_unchecked(sequence); - - // If execution outcome is available, use the computed state_root and block_hash. - // FlashBlockService computes these when building sequences on top of the local tip. - if let Some(execution_outcome) = sequence.execution_outcome() { - let payload = data.payload.as_v1_mut(); - payload.state_root = execution_outcome.state_root; - payload.block_hash = execution_outcome.block_hash; - } - - // Only proceed if we have a valid state_root (non-zero). - if data.payload.as_v1_mut().state_root == B256::ZERO { - return Err("No state_root available for payload"); - } - - Ok(data) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{sequence::SequenceExecutionOutcome, test_utils::TestFlashBlockFactory}; - - mod op_execution_data_conversion { - use super::*; - - #[test] - fn test_try_from_fails_with_zero_state_root() { - // When execution_outcome is None, state_root remains zero and conversion fails - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - - let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); - - let result = OpExecutionData::try_from(&sequence); - assert!(result.is_err()); - assert_eq!(result.unwrap_err(), "No state_root available for payload"); - } - - #[test] - fn test_try_from_succeeds_with_execution_outcome() { - // When execution_outcome has state_root, conversion succeeds - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - - let execution_outcome = SequenceExecutionOutcome { - block_hash: B256::random(), - state_root: B256::random(), // Non-zero - }; - - let sequence = - FlashBlockCompleteSequence::new(vec![fb0], Some(execution_outcome)).unwrap(); - - let result = OpExecutionData::try_from(&sequence); - assert!(result.is_ok()); - - let mut data = result.unwrap(); - assert_eq!(data.payload.as_v1_mut().state_root, execution_outcome.state_root); - assert_eq!(data.payload.as_v1_mut().block_hash, execution_outcome.block_hash); - } - - #[test] - fn test_try_from_succeeds_with_provided_state_root() { - // When sequencer provides non-zero state_root, conversion succeeds - let factory = TestFlashBlockFactory::new(); - let provided_state_root = B256::random(); - let fb0 = factory.flashblock_at(0).state_root(provided_state_root).build(); - - let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); - - let result = OpExecutionData::try_from(&sequence); - assert!(result.is_ok()); - - let mut data = result.unwrap(); - assert_eq!(data.payload.as_v1_mut().state_root, provided_state_root); - } - - #[test] - fn test_try_from_execution_outcome_overrides_provided_state_root() { - // execution_outcome takes precedence over sequencer-provided state_root - let factory = TestFlashBlockFactory::new(); - let provided_state_root = B256::random(); - let fb0 = factory.flashblock_at(0).state_root(provided_state_root).build(); - - let execution_outcome = SequenceExecutionOutcome { - block_hash: B256::random(), - state_root: B256::random(), // Different from provided - }; - - let sequence = - FlashBlockCompleteSequence::new(vec![fb0], Some(execution_outcome)).unwrap(); - - let result = OpExecutionData::try_from(&sequence); - assert!(result.is_ok()); - - let mut data = result.unwrap(); - // Should use execution_outcome, not the provided state_root - assert_eq!(data.payload.as_v1_mut().state_root, execution_outcome.state_root); - assert_ne!(data.payload.as_v1_mut().state_root, provided_state_root); - } - - #[test] - fn test_try_from_with_multiple_flashblocks() { - // Test conversion with sequence of multiple flashblocks - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - let fb1 = factory.flashblock_after(&fb0).state_root(B256::ZERO).build(); - let fb2 = factory.flashblock_after(&fb1).state_root(B256::ZERO).build(); - - let execution_outcome = - SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; - - let sequence = - FlashBlockCompleteSequence::new(vec![fb0, fb1, fb2], Some(execution_outcome)) - .unwrap(); - - let result = OpExecutionData::try_from(&sequence); - assert!(result.is_ok()); - - let mut data = result.unwrap(); - assert_eq!(data.payload.as_v1_mut().state_root, execution_outcome.state_root); - assert_eq!(data.payload.as_v1_mut().block_hash, execution_outcome.block_hash); - } - } - - mod consensus_client_creation { - use super::*; - use tokio::sync::broadcast; - - #[test] - fn test_new_creates_client() { - let (engine_tx, _) = tokio::sync::mpsc::unbounded_channel(); - let engine_handle = ConsensusEngineHandle::::new(engine_tx); - - let (_, sequence_rx) = broadcast::channel(1); - - let result = FlashBlockConsensusClient::new(engine_handle, sequence_rx); - assert!(result.is_ok()); - } - } - - mod submit_new_payload_behavior { - use super::*; - - #[test] - fn test_submit_new_payload_returns_parent_hash_when_no_state_root() { - // When conversion fails (no state_root), should return parent_hash - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - - let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); - - // Verify conversion would fail - let conversion_result = OpExecutionData::try_from(&sequence); - assert!(conversion_result.is_err()); - - // In the actual run loop, submit_new_payload would return parent_hash - assert_eq!(sequence.payload_base().parent_hash, parent_hash); - } - - #[test] - fn test_submit_new_payload_returns_block_hash_when_state_root_available() { - // When conversion succeeds, should return the new block's hash - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - - let execution_outcome = - SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; - - let sequence = - FlashBlockCompleteSequence::new(vec![fb0], Some(execution_outcome)).unwrap(); - - // Verify conversion succeeds - let conversion_result = OpExecutionData::try_from(&sequence); - assert!(conversion_result.is_ok()); - - let mut data = conversion_result.unwrap(); - assert_eq!(data.payload.as_v1_mut().block_hash, execution_outcome.block_hash); - } - } - - mod forkchoice_update_behavior { - use super::*; - - #[test] - fn test_forkchoice_state_uses_parent_hash_for_safe_and_finalized() { - // Both safe_hash and finalized_hash should be set to parent_hash - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - - let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); - - // Verify the expected forkchoice state - assert_eq!(sequence.payload_base().parent_hash, parent_hash); - } - - #[test] - fn test_forkchoice_update_with_new_block_hash() { - // When newPayload succeeds, FCU should use the new block's hash as head - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - - let execution_outcome = - SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; - - let sequence = - FlashBlockCompleteSequence::new(vec![fb0], Some(execution_outcome)).unwrap(); - - // The head_block_hash for FCU would be execution_outcome.block_hash - assert_eq!( - sequence.execution_outcome().unwrap().block_hash, - execution_outcome.block_hash - ); - } - - #[test] - fn test_forkchoice_update_with_parent_hash_when_no_state_root() { - // When newPayload is skipped (no state_root), FCU should use parent_hash as head - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - - let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); - - // The head_block_hash for FCU would be parent_hash (fallback) - assert_eq!(sequence.payload_base().parent_hash, parent_hash); - } - } - - mod run_loop_logic { - use super::*; - - #[test] - fn test_run_loop_processes_sequence_with_state_root() { - // Scenario: Sequence with state_root should trigger both newPayload and FCU - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - - let execution_outcome = - SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; - - let sequence = - FlashBlockCompleteSequence::new(vec![fb0], Some(execution_outcome)).unwrap(); - - // Verify sequence is ready for newPayload - let conversion = OpExecutionData::try_from(&sequence); - assert!(conversion.is_ok()); - } - - #[test] - fn test_run_loop_processes_sequence_without_state_root() { - // Scenario: Sequence without state_root should skip newPayload but still do FCU - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - - let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); - - // Verify sequence cannot be converted (newPayload will be skipped) - let conversion = OpExecutionData::try_from(&sequence); - assert!(conversion.is_err()); - - // But FCU should still happen with parent_hash - assert!(sequence.payload_base().parent_hash != B256::ZERO); - } - - #[test] - fn test_run_loop_handles_multiple_sequences() { - // Multiple sequences should be processed independently - let factory = TestFlashBlockFactory::new(); - - // Sequence 1: With state_root - let fb0_seq1 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - let outcome1 = - SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; - let seq1 = - FlashBlockCompleteSequence::new(vec![fb0_seq1.clone()], Some(outcome1)).unwrap(); - - // Sequence 2: Without state_root (for next block) - let fb0_seq2 = factory.flashblock_for_next_block(&fb0_seq1).build(); - let seq2 = FlashBlockCompleteSequence::new(vec![fb0_seq2], None).unwrap(); - - // Both should be valid sequences - assert_eq!(seq1.block_number(), 100); - assert_eq!(seq2.block_number(), 101); - - // seq1 can be converted - assert!(OpExecutionData::try_from(&seq1).is_ok()); - // seq2 cannot be converted - assert!(OpExecutionData::try_from(&seq2).is_err()); - } - } -} diff --git a/crates/flashblocks/src/tx_cache.rs b/crates/flashblocks/src/execution/cache.rs similarity index 92% rename from crates/flashblocks/src/tx_cache.rs rename to crates/flashblocks/src/execution/cache.rs index f03d5e0c..93b744f5 100644 --- a/crates/flashblocks/src/tx_cache.rs +++ b/crates/flashblocks/src/execution/cache.rs @@ -1,8 +1,10 @@ -//! Transaction execution caching for flashblock building. +//! Execution caching for flashblock building. //! //! When flashblocks arrive incrementally, each new flashblock triggers a rebuild of pending -//! state from all transactions in the sequence. Without caching, this means re-reading -//! state from disk for accounts/storage that were already loaded in previous builds. +//! state from all transactions in the sequence. To ensure that the incoming flashblocks +//! are incrementally re-built, from their sequence, the execution cache stores the cumulative +//! bundle state from previous executions. This ensures that states are not re-read from disk +//! for accounts/storage that were already loaded in previous builds. //! //! # Approach //! @@ -11,10 +13,6 @@ //! cached bundle can be used as a **prestate** for the State builder. This avoids redundant //! disk reads for accounts/storage that were already modified. //! -//! **Important**: Prefix transaction skipping is only safe when the incoming transaction list -//! fully extends the cached list. In that case, callers can execute only the uncached suffix -//! and stitch in the cached prefix receipts/metadata. -//! //! The cache stores: //! - Ordered list of executed transaction hashes (for prefix matching) //! - Cumulative bundle state after all cached transactions (used as prestate) @@ -56,12 +54,8 @@ pub(crate) struct CachedExecutionMeta { pub blob_gas_used: u64, } -/// Resumable cached state: bundle + receipts + cached prefix length. -pub(crate) type ResumableState<'a, N> = - (&'a BundleState, &'a [::Receipt], usize); - /// Resumable cached state plus execution metadata for the cached prefix. -pub(crate) type ResumableStateWithExecutionMeta<'a, N> = +pub(crate) type ResumableState<'a, N> = (&'a BundleState, &'a [::Receipt], &'a Requests, u64, u64, usize); /// Cache of transaction execution results for a single block. @@ -202,8 +196,8 @@ impl TransactionCache { /// Returns cached state for resuming execution if the incoming transactions /// have a matching prefix with the cache. /// - /// Returns `Some((bundle, receipts, skip_count))` if there's a non-empty matching - /// prefix, where: + /// Returns `Some((bundle, receipts, requests, gas_used, blob_gas_used, skip_count))` if + /// there's a non-empty matching prefix, where: /// - `bundle` is the cumulative state after the matching prefix /// - `receipts` is the receipts for the matching prefix /// - `skip_count` is the number of transactions to skip @@ -212,25 +206,11 @@ impl TransactionCache { /// - The cache is empty /// - No prefix matches (first transaction differs) /// - Block number doesn't match - pub fn get_resumable_state( + pub(crate) fn get_resumable_state( &self, block_number: u64, tx_hashes: &[B256], ) -> Option> { - self.get_resumable_state_with_execution_meta(block_number, tx_hashes) - .map(|(bundle, receipts, .., skip_count)| (bundle, receipts, skip_count)) - } - - /// Returns cached state and execution metadata for resuming execution if the incoming - /// transactions have a matching prefix with the cache. - /// - /// Returns `Some((bundle, receipts, requests, gas_used, blob_gas_used, skip_count))` if - /// there's a non-empty matching prefix and the entire cache matches the incoming prefix. - pub(crate) fn get_resumable_state_with_execution_meta( - &self, - block_number: u64, - tx_hashes: &[B256], - ) -> Option> { if !self.is_valid_for_block(block_number) || self.is_empty() { return None; } @@ -264,7 +244,7 @@ impl TransactionCache { block_number: u64, parent_hash: B256, tx_hashes: &[B256], - ) -> Option> { + ) -> Option> { if !self.is_valid_for_block_parent(block_number, parent_hash) || self.is_empty() { return None; } @@ -422,13 +402,13 @@ mod tests { // Exact match returns state let result = cache.get_resumable_state(100, &[tx_a, tx_b]); assert!(result.is_some()); - let (_, _, skip) = result.unwrap(); + let (_, _, _, _, _, skip) = result.unwrap(); assert_eq!(skip, 2); // Continuation returns state (can skip cached txs) let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); assert!(result.is_some()); - let (_, _, skip) = result.unwrap(); + let (_, _, _, _, _, skip) = result.unwrap(); assert_eq!(skip, 2); // Partial match (reorg) returns None - can't use partial cache @@ -459,7 +439,7 @@ mod tests { let fb1_txs = vec![tx_a, tx_b, tx_c]; let result = cache.get_resumable_state(100, &fb1_txs); assert!(result.is_some()); - let (bundle, receipts, skip) = result.unwrap(); + let (bundle, receipts, _, _, _, skip) = result.unwrap(); // skip=2 indicates 2 txs are covered by cached state (for logging) // Note: All transactions are still executed, skip is informational only @@ -514,7 +494,7 @@ mod tests { let fb1_txs = vec![tx_a, tx_b]; let result = cache.get_resumable_state(100, &fb1_txs); assert!(result.is_some()); - assert_eq!(result.unwrap().2, 1); // 1 tx covered by cache + assert_eq!(result.unwrap().5, 1); // 1 tx covered by cache cache.update(100, fb1_txs, BundleState::default(), vec![]); assert_eq!(cache.len(), 2); @@ -523,7 +503,7 @@ mod tests { let fb2_txs = vec![tx_a, tx_b, tx_c]; let result = cache.get_resumable_state(100, &fb2_txs); assert!(result.is_some()); - assert_eq!(result.unwrap().2, 2); // 2 txs covered by cache + assert_eq!(result.unwrap().5, 2); // 2 txs covered by cache cache.update(100, fb2_txs, BundleState::default(), vec![]); assert_eq!(cache.len(), 3); @@ -583,7 +563,7 @@ mod tests { // get_resumable_state returns skip=2 for prefix [A, B] let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); assert!(result.is_some()); - let (bundle, _receipts, skip_count) = result.unwrap(); + let (bundle, _receipts, _, _, _, skip_count) = result.unwrap(); // skip_count indicates cached prefix length assert_eq!(skip_count, 2); @@ -624,7 +604,7 @@ mod tests { let tx_c = B256::repeat_byte(0xCC); let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); assert!(result.is_some()); - let (_, receipts, _) = result.unwrap(); + let (_, receipts, _, _, _, _) = result.unwrap(); assert_eq!(receipts.len(), 2); } @@ -651,7 +631,7 @@ mod tests { }, ); - let resumable = cache.get_resumable_state_with_execution_meta(100, &[tx_a, tx_b, tx_c]); + let resumable = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); assert!(resumable.is_some()); let (_, _, cached_requests, gas_used, blob_gas_used, skip_count) = resumable.unwrap(); assert_eq!(skip_count, 2); diff --git a/crates/flashblocks/src/execution/mod.rs b/crates/flashblocks/src/execution/mod.rs new file mode 100644 index 00000000..4b0e11bd --- /dev/null +++ b/crates/flashblocks/src/execution/mod.rs @@ -0,0 +1,4 @@ +mod cache; +pub(crate) mod worker; + +pub use worker::FlashblockCachedReceipt; diff --git a/crates/flashblocks/src/worker.rs b/crates/flashblocks/src/execution/worker.rs similarity index 86% rename from crates/flashblocks/src/worker.rs rename to crates/flashblocks/src/execution/worker.rs index 957f0333..32877e09 100644 --- a/crates/flashblocks/src/worker.rs +++ b/crates/flashblocks/src/execution/worker.rs @@ -1,11 +1,18 @@ use crate::{ - pending_state::PendingBlockState, - tx_cache::{CachedExecutionMeta, TransactionCache}, + execution::cache::{CachedExecutionMeta, TransactionCache}, + types::pending_state::PendingBlockState, PendingFlashBlock, }; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; +use tracing::trace; + use alloy_eips::{eip2718::WithEncoded, BlockNumberOrTag}; use alloy_primitives::B256; use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; + use reth_chain_state::{ComputedTrieData, ExecutedBlock}; use reth_errors::RethError; use reth_evm::{ @@ -30,27 +37,56 @@ use reth_storage_api::{ noop::NoopProvider, BlockReaderIdExt, HashedPostStateProvider, StateProviderFactory, StateRootProvider, }; -use std::{ - sync::Arc, - time::{Duration, Instant}, -}; -use tracing::trace; /// The `FlashBlockBuilder` builds [`PendingBlock`] out of a sequence of transactions. +/// +/// Owns a [`TransactionCache`] for incremental prefix caching between flashblock builds. #[derive(Debug)] -pub(crate) struct FlashBlockBuilder { +pub(crate) struct FlashBlockBuilder { evm_config: EvmConfig, provider: Provider, + tx_cache: TransactionCache, } -impl FlashBlockBuilder { - pub(crate) const fn new(evm_config: EvmConfig, provider: Provider) -> Self { - Self { evm_config, provider } +impl FlashBlockBuilder { + pub(crate) fn new(evm_config: EvmConfig, provider: Provider) -> Self { + Self { evm_config, provider, tx_cache: TransactionCache::new() } } pub(crate) const fn provider(&self) -> &Provider { &self.provider } + + /// Clears the transaction cache (used on reorg/catch-up). + pub(crate) fn clear_cache(&mut self) { + self.tx_cache.clear(); + } + + /// Resets the transaction cache to a fresh empty state. + pub(crate) fn reset_cache(&mut self) { + self.tx_cache = TransactionCache::new(); + } +} + +impl + FlashBlockBuilder +{ + /// Clones the builder config and moves the transaction cache into the new + /// builder, leaving `self` with an empty cache. + /// + /// Used before spawning a blocking build task. + pub(crate) fn fork_with_cache(&mut self) -> Self { + Self { + evm_config: self.evm_config.clone(), + provider: self.provider.clone(), + tx_cache: std::mem::take(&mut self.tx_cache), + } + } + + /// Restores the transaction cache from a completed forked builder. + pub(crate) fn merge_cache(&mut self, other: Self) { + self.tx_cache = other.tx_cache; + } } pub(crate) struct BuildArgs { @@ -111,7 +147,7 @@ impl FlashblockCachedReceipt for OpReceipt { } } -impl FlashBlockBuilder +impl FlashBlockBuilder where N: NodePrimitives, N::Receipt: FlashblockCachedReceipt, @@ -131,17 +167,16 @@ where /// 1. **Canonical mode**: Parent matches local tip - uses state from storage /// 2. **Speculative mode**: Parent is a pending block - uses pending state /// - /// When a `tx_cache` is provided and we're in canonical mode, the builder will - /// attempt to resume from cached state if the transaction list is a continuation - /// of what was previously executed. + /// In canonical mode, the internal transaction cache is used to resume from + /// cached state if the transaction list is a continuation of what was previously + /// executed. /// /// Returns `None` if: /// - In canonical mode: flashblock doesn't attach to the latest header /// - In speculative mode: no pending parent state provided pub(crate) fn execute>>>( - &self, + &mut self, mut args: BuildArgs, - tx_cache: Option<&mut TransactionCache>, ) -> eyre::Result>> { trace!(target: "flashblocks", "Attempting new pending block from flashblocks"); @@ -233,38 +268,27 @@ where // Check for resumable canonical execution state. let canonical_parent_hash = args.base.parent_hash; let cached_prefix = if is_canonical { - tx_cache.as_ref().and_then(|cache| { - cache - .get_resumable_state_with_execution_meta_for_parent( - args.base.block_number, - canonical_parent_hash, - &tx_hashes, - ) - .map( - |( - bundle, - receipts, - _requests, - gas_used, - blob_gas_used, - cached_tx_count, - )| { - trace!( - target: "flashblocks", - cached_tx_count, - total_txs = tx_hashes.len(), - "Cache hit (executing only uncached suffix)" - ); - CachedPrefixExecutionResult { - cached_tx_count, - bundle: bundle.clone(), - receipts: receipts.to_vec(), - gas_used, - blob_gas_used, - } - }, - ) - }) + self.tx_cache + .get_resumable_state_with_execution_meta_for_parent( + args.base.block_number, + canonical_parent_hash, + &tx_hashes, + ) + .map(|(bundle, receipts, _requests, gas_used, blob_gas_used, cached_tx_count)| { + trace!( + target: "flashblocks", + cached_tx_count, + total_txs = tx_hashes.len(), + "Cache hit (executing only uncached suffix)" + ); + CachedPrefixExecutionResult { + cached_tx_count, + bundle: bundle.clone(), + receipts: receipts.to_vec(), + gas_used, + blob_gas_used, + } + }) } else { None }; @@ -388,11 +412,9 @@ where (execution_result, block, hashed_state, bundle) }; - // Update transaction cache if provided (only in canonical mode) - if let Some(cache) = tx_cache - && is_canonical - { - cache.update_with_execution_meta_for_parent( + // Update internal transaction cache (only in canonical mode) + if is_canonical { + self.tx_cache.update_with_execution_meta_for_parent( args.base.block_number, canonical_parent_hash, tx_hashes, @@ -488,16 +510,10 @@ fn is_consistent_speculative_parent_hashes( incoming_parent_hash == pending_block_hash && pending_block_hash == pending_sealed_hash } -impl Clone for FlashBlockBuilder { - fn clone(&self) -> Self { - Self { evm_config: self.evm_config.clone(), provider: self.provider.clone() } - } -} - #[cfg(test)] mod tests { use super::{is_consistent_speculative_parent_hashes, BuildArgs, FlashBlockBuilder}; - use crate::{tx_cache::CachedExecutionMeta, TransactionCache}; + use crate::execution::cache::CachedExecutionMeta; use alloy_consensus::{SignableTransaction, TxEip1559}; use alloy_eips::eip2718::Encodable2718; use alloy_network::TxSignerSync; @@ -618,29 +634,26 @@ mod tests { let tx_c = into_encoded_recovered(tx_c, signer); let evm_config = OpEvmConfig::optimism(OP_MAINNET.clone()); - let builder = FlashBlockBuilder::new(evm_config, provider); - let mut tx_cache = TransactionCache::::new(); + let mut builder = FlashBlockBuilder::::new(evm_config, provider); let first = builder - .execute( - BuildArgs { - base: base.clone(), - transactions: vec![tx_a.clone(), tx_b.clone()], - cached_state: None, - last_flashblock_index: 0, - last_flashblock_hash: B256::repeat_byte(0xA0), - compute_state_root: false, - pending_parent: None, - }, - Some(&mut tx_cache), - ) + .execute(BuildArgs { + base: base.clone(), + transactions: vec![tx_a.clone(), tx_b.clone()], + cached_state: None, + last_flashblock_index: 0, + last_flashblock_hash: B256::repeat_byte(0xA0), + compute_state_root: false, + pending_parent: None, + }) .expect("first build succeeds") .expect("first build is canonical"); assert_eq!(first.pending_state.execution_outcome.result.receipts.len(), 2); let cached_hashes = vec![tx_a_hash, tx_b_hash]; - let (bundle, receipts, requests, gas_used, blob_gas_used, skip) = tx_cache + let (bundle, receipts, requests, gas_used, blob_gas_used, skip) = builder + .tx_cache .get_resumable_state_with_execution_meta_for_parent( base.block_number, base_parent_hash, @@ -654,7 +667,7 @@ mod tests { tampered_receipts[0].as_receipt().cumulative_gas_used.saturating_add(17); let expected_tampered_gas = tampered_receipts[0].as_receipt().cumulative_gas_used; - tx_cache.update_with_execution_meta_for_parent( + builder.tx_cache.update_with_execution_meta_for_parent( base.block_number, base_parent_hash, cached_hashes, @@ -664,7 +677,8 @@ mod tests { ); let second_hashes = vec![tx_a_hash, tx_b_hash, tx_c_hash]; - let (_, _, _, _, _, skip) = tx_cache + let (_, _, _, _, _, skip) = builder + .tx_cache .get_resumable_state_with_execution_meta_for_parent( base.block_number, base_parent_hash, @@ -674,18 +688,15 @@ mod tests { assert_eq!(skip, 2); let second = builder - .execute( - BuildArgs { - base, - transactions: vec![tx_a, tx_b, tx_c], - cached_state: None, - last_flashblock_index: 1, - last_flashblock_hash: B256::repeat_byte(0xA1), - compute_state_root: false, - pending_parent: None, - }, - Some(&mut tx_cache), - ) + .execute(BuildArgs { + base, + transactions: vec![tx_a, tx_b, tx_c], + cached_state: None, + last_flashblock_index: 1, + last_flashblock_hash: B256::repeat_byte(0xA1), + compute_state_root: false, + pending_parent: None, + }) .expect("second build succeeds") .expect("second build is canonical"); diff --git a/crates/flashblocks/src/handler.rs b/crates/flashblocks/src/handle.rs similarity index 100% rename from crates/flashblocks/src/handler.rs rename to crates/flashblocks/src/handle.rs diff --git a/crates/flashblocks/src/lib.rs b/crates/flashblocks/src/lib.rs index 271b1973..21700718 100644 --- a/crates/flashblocks/src/lib.rs +++ b/crates/flashblocks/src/lib.rs @@ -1,27 +1,18 @@ //! X-Layer flashblocks crate. -pub mod handler; -pub mod pubsub; +pub mod execution; +pub mod handle; pub mod subscription; +pub use execution::FlashblockCachedReceipt; + use reth_primitives_traits::NodePrimitives; use std::sync::Arc; -// Included to enable serde feature for `OpReceipt` type used transitively -use reth_optimism_primitives as _; - -// Used by downstream crates -use alloy_rpc_types_eth as _; - -mod consensus; -pub use consensus::FlashBlockConsensusClient; - -mod payload; -pub use payload::{FlashBlock, PendingFlashBlock}; - -mod sequence; -pub use sequence::{ - FlashBlockCompleteSequence, FlashBlockPendingSequence, SequenceExecutionOutcome, +pub mod types; +pub use types::{ + FlashBlock, FlashBlockCompleteSequence, FlashBlockPendingSequence, PendingBlockState, + PendingFlashBlock, PendingStateRegistry, SequenceExecutionOutcome, }; mod service; @@ -30,19 +21,10 @@ pub use service::{ FlashBlockService, }; -mod worker; -pub use worker::FlashblockCachedReceipt; - mod cache; -mod pending_state; -pub use pending_state::{PendingBlockState, PendingStateRegistry}; - pub mod validation; -mod tx_cache; -pub use tx_cache::TransactionCache; - #[cfg(test)] mod test_utils; diff --git a/crates/flashblocks/src/service.rs b/crates/flashblocks/src/service.rs index fe9e0da5..dc08a609 100644 --- a/crates/flashblocks/src/service.rs +++ b/crates/flashblocks/src/service.rs @@ -1,9 +1,8 @@ use crate::{ cache::{BuildApplyOutcome, BuildTicket, SequenceManager}, - pending_state::PendingStateRegistry, - tx_cache::TransactionCache, + execution::worker::{BuildResult, FlashBlockBuilder, FlashblockCachedReceipt}, + types::pending_state::PendingStateRegistry, validation::{CanonicalBlockFingerprint, ReconciliationStrategy}, - worker::{BuildResult, FlashBlockBuilder, FlashblockCachedReceipt}, FlashBlock, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, InProgressFlashBlockRx, PendingFlashBlock, }; @@ -67,17 +66,15 @@ pub struct FlashBlockService< received_flashblocks_tx: tokio::sync::broadcast::Sender>, /// Executes flashblock sequences to build pending blocks. - builder: FlashBlockBuilder, + builder: FlashBlockBuilder, /// Task executor for spawning block build jobs. spawner: TaskExecutor, /// Currently running block build job with start time and result receiver. - job: Option>, + job: Option>, /// Manages flashblock sequences with caching and intelligent build selection. sequences: SequenceManager, /// Registry for pending block states to enable speculative building. pending_states: PendingStateRegistry, - /// Transaction execution cache for incremental flashblock building. - tx_cache: TransactionCache, /// Epoch counter for state invalidation. /// @@ -130,7 +127,6 @@ where job: None, sequences: SequenceManager::new(compute_state_root), pending_states: PendingStateRegistry::new(), - tx_cache: TransactionCache::new(), state_epoch: 0, max_depth: DEFAULT_MAX_DEPTH, metrics: FlashBlockServiceMetrics::default(), @@ -209,13 +205,13 @@ where let _ = self.in_progress_tx.send(None); // Handle channel error (task panicked or was cancelled) - let Some(Ok((result, returned_cache))) = job_result else { + let Some(Ok((result, returned_builder))) = job_result else { warn!( target: "flashblocks", "Build job channel closed unexpectedly (task may have panicked)" ); // Re-initialize transaction cache since we lost the one sent to the task - self.tx_cache = TransactionCache::new(); + self.builder.reset_cache(); self.schedule_followup_build(); continue; }; @@ -238,7 +234,7 @@ where } // Restore the transaction cache from the spawned task (only if epoch matched) - self.tx_cache = returned_cache; + self.builder.merge_cache(returned_builder); match result { Ok(Some(build_result)) => { @@ -383,7 +379,7 @@ where | ReconciliationStrategy::DepthLimitExceeded { .. } ) { self.pending_states.clear(); - self.tx_cache.clear(); + self.builder.clear_cache(); self.state_epoch = self.state_epoch.wrapping_add(1); trace!( target: "flashblocks", @@ -452,14 +448,11 @@ where self.metrics.current_index.set(fb_info.index as f64); let _ = self.in_progress_tx.send(Some(fb_info)); - // Take ownership of the transaction cache for the spawned task - let mut tx_cache = std::mem::take(&mut self.tx_cache); - let (result_tx, result_rx) = oneshot::channel(); - let builder = self.builder.clone(); + let mut builder = self.builder.fork_with_cache(); self.spawner.spawn_blocking(Box::pin(async move { - let result = builder.execute(args, Some(&mut tx_cache)); - let _ = result_tx.send((result, tx_cache)); + let result = builder.execute(args); + let _ = result_tx.send((result, builder)); })); self.job = Some(BuildJob { start_time: Instant::now(), @@ -484,7 +477,7 @@ pub struct FlashBlockBuildInfo { /// A running build job with metadata for tracking and invalidation. #[derive(Debug)] -struct BuildJob { +struct BuildJob { /// When the job was started. start_time: Instant, /// The state epoch when this job was started. @@ -494,9 +487,12 @@ struct BuildJob { epoch: u64, /// Opaque ticket identifying the exact sequence snapshot targeted by this build job. ticket: BuildTicket, - /// Receiver for the build result and returned transaction cache. + /// Receiver for the build result and returned builder (with updated cache). #[allow(clippy::type_complexity)] - result_rx: oneshot::Receiver<(eyre::Result>>, TransactionCache)>, + result_rx: oneshot::Receiver<( + eyre::Result>>, + FlashBlockBuilder, + )>, } /// Creates a bounded channel for canonical block notifications. diff --git a/crates/flashblocks/src/subscription/mod.rs b/crates/flashblocks/src/subscription/mod.rs new file mode 100644 index 00000000..d9fc5e85 --- /dev/null +++ b/crates/flashblocks/src/subscription/mod.rs @@ -0,0 +1,4 @@ +pub mod pubsub; +mod rpc; + +pub use rpc::{FlashblocksPubSub, FlashblocksPubSubApiServer}; diff --git a/crates/flashblocks/src/pubsub.rs b/crates/flashblocks/src/subscription/pubsub.rs similarity index 99% rename from crates/flashblocks/src/pubsub.rs rename to crates/flashblocks/src/subscription/pubsub.rs index 5f16c47f..d82c3c92 100644 --- a/crates/flashblocks/src/pubsub.rs +++ b/crates/flashblocks/src/subscription/pubsub.rs @@ -1,12 +1,14 @@ +use jsonrpsee::types::ErrorObject; +use serde::{Deserialize, Serialize}; +use std::collections::HashSet; + use alloy_primitives::{Address, TxHash}; use alloy_rpc_types_eth::{ pubsub::{Params as AlloyParams, SubscriptionKind as AlloySubscriptionKind}, Header, }; -use jsonrpsee::types::ErrorObject; + use reth_rpc_server_types::result::invalid_params_rpc_err; -use serde::{Deserialize, Serialize}; -use std::collections::HashSet; const FLASHBLOCKS: &str = "flashblocks"; diff --git a/crates/flashblocks/src/subscription.rs b/crates/flashblocks/src/subscription/rpc.rs similarity index 99% rename from crates/flashblocks/src/subscription.rs rename to crates/flashblocks/src/subscription/rpc.rs index f033c2c4..ec11bb98 100644 --- a/crates/flashblocks/src/subscription.rs +++ b/crates/flashblocks/src/subscription/rpc.rs @@ -1,11 +1,8 @@ -use crate::pubsub::{ +use super::pubsub::{ EnrichedTransaction, FlashblockParams, FlashblockStreamEvent, FlashblockSubscriptionKind, FlashblocksFilter, }; -use alloy_consensus::{transaction::TxHashRef, BlockHeader as _, Transaction as _, TxReceipt as _}; -use alloy_json_rpc::RpcObject; -use alloy_primitives::{Address, TxHash, U256}; -use alloy_rpc_types_eth::{Header, TransactionInfo}; + use futures::StreamExt; use jsonrpsee::{ proc_macros::rpc, server::SubscriptionMessage, types::ErrorObject, PendingSubscriptionSink, @@ -13,6 +10,14 @@ use jsonrpsee::{ }; use moka::policy::EvictionPolicy; use moka::sync::Cache; +use std::{collections::HashSet, future::ready, sync::Arc}; +use tokio_stream::{wrappers::WatchStream, Stream}; + +use alloy_consensus::{transaction::TxHashRef, BlockHeader as _, Transaction as _, TxReceipt as _}; +use alloy_json_rpc::RpcObject; +use alloy_primitives::{Address, TxHash, U256}; +use alloy_rpc_types_eth::{Header, TransactionInfo}; + use reth_optimism_flashblocks::{PendingBlockRx, PendingFlashBlock}; use reth_primitives_traits::{ NodePrimitives, Recovered, RecoveredBlock, SealedBlock, TransactionMeta, @@ -25,8 +30,6 @@ use reth_rpc_server_types::result::{internal_rpc_err, invalid_params_rpc_err}; use reth_storage_api::BlockNumReader; use reth_tasks::TaskSpawner; use reth_tracing::tracing::{trace, warn}; -use std::{collections::HashSet, future::ready, sync::Arc}; -use tokio_stream::{wrappers::WatchStream, Stream}; const MAX_TXHASH_CACHE_SIZE: u64 = 10_000; diff --git a/crates/flashblocks/src/types/mod.rs b/crates/flashblocks/src/types/mod.rs new file mode 100644 index 00000000..3104bd55 --- /dev/null +++ b/crates/flashblocks/src/types/mod.rs @@ -0,0 +1,9 @@ +pub(crate) mod payload; +pub(crate) mod pending_state; +pub(crate) mod sequence; + +pub use payload::{FlashBlock, PendingFlashBlock}; +pub use pending_state::{PendingBlockState, PendingStateRegistry}; +pub use sequence::{ + FlashBlockCompleteSequence, FlashBlockPendingSequence, SequenceExecutionOutcome, +}; diff --git a/crates/flashblocks/src/payload.rs b/crates/flashblocks/src/types/payload.rs similarity index 100% rename from crates/flashblocks/src/payload.rs rename to crates/flashblocks/src/types/payload.rs diff --git a/crates/flashblocks/src/pending_state.rs b/crates/flashblocks/src/types/pending_state.rs similarity index 100% rename from crates/flashblocks/src/pending_state.rs rename to crates/flashblocks/src/types/pending_state.rs diff --git a/crates/flashblocks/src/sequence.rs b/crates/flashblocks/src/types/sequence.rs similarity index 100% rename from crates/flashblocks/src/sequence.rs rename to crates/flashblocks/src/types/sequence.rs From beed97bf6d7a4d08be9aa5728c393083e244e8c0 Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 9 Mar 2026 15:49:47 +0800 Subject: [PATCH 02/76] style(flashblocks): fix import ordering in cache module MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.6 --- crates/flashblocks/src/cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/flashblocks/src/cache.rs b/crates/flashblocks/src/cache.rs index b8129d92..50956021 100644 --- a/crates/flashblocks/src/cache.rs +++ b/crates/flashblocks/src/cache.rs @@ -4,13 +4,13 @@ //! and intelligently selects which sequence to build based on the local chain tip. use crate::{ + execution::worker::BuildArgs, types::pending_state::PendingBlockState, types::sequence::{FlashBlockPendingSequence, SequenceExecutionOutcome}, validation::{ CanonicalBlockFingerprint, CanonicalBlockReconciler, ReconciliationStrategy, ReorgDetector, TrackedBlockFingerprint, }, - execution::worker::BuildArgs, FlashBlock, FlashBlockCompleteSequence, PendingFlashBlock, }; use alloy_eips::eip2718::WithEncoded; From 8c4ca8262135eff11ec91cf7dfeafe9d20163e79 Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 9 Mar 2026 18:11:50 +0800 Subject: [PATCH 03/76] feat(flashblocks-rpc): add state cache with pending and confirm caches MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- Cargo.lock | 2 + crates/flashblocks/Cargo.toml | 2 + crates/flashblocks/src/cache/confirm.rs | 147 +++++ crates/flashblocks/src/cache/mod.rs | 4 + .../{types/payload.rs => cache/pending.rs} | 50 +- .../src/{cache.rs => cache/raw.rs} | 0 crates/flashblocks/src/cache/state.rs | 97 ++++ crates/flashblocks/src/lib.rs | 68 +-- crates/flashblocks/src/service.rs | 539 ------------------ crates/flashblocks/src/types/mod.rs | 4 - crates/flashblocks/src/types/pending_state.rs | 388 ------------- crates/flashblocks/src/types/sequence.rs | 48 +- 12 files changed, 290 insertions(+), 1059 deletions(-) create mode 100644 crates/flashblocks/src/cache/confirm.rs create mode 100644 crates/flashblocks/src/cache/mod.rs rename crates/flashblocks/src/{types/payload.rs => cache/pending.rs} (96%) rename crates/flashblocks/src/{cache.rs => cache/raw.rs} (100%) create mode 100644 crates/flashblocks/src/cache/state.rs delete mode 100644 crates/flashblocks/src/service.rs delete mode 100644 crates/flashblocks/src/types/pending_state.rs diff --git a/Cargo.lock b/Cargo.lock index f571932b..9b26481f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14262,6 +14262,7 @@ dependencies = [ "op-alloy-consensus", "op-alloy-rpc-types-engine", "op-revm", + "parking_lot", "reth-chain-state", "reth-errors", "reth-evm", @@ -14288,6 +14289,7 @@ dependencies = [ "serde", "serde_json", "test-case", + "thiserror 1.0.69", "tokio", "tokio-stream", "tokio-tungstenite", diff --git a/crates/flashblocks/Cargo.toml b/crates/flashblocks/Cargo.toml index 7bbecfd0..30cf9e2a 100644 --- a/crates/flashblocks/Cargo.toml +++ b/crates/flashblocks/Cargo.toml @@ -62,6 +62,8 @@ async-trait.workspace = true brotli = { workspace = true, features = ["std"] } derive_more.workspace = true eyre.workspace = true +parking_lot.workspace = true +thiserror.workspace = true tracing.workspace = true metrics.workspace = true moka.workspace = true diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs new file mode 100644 index 00000000..30b0f599 --- /dev/null +++ b/crates/flashblocks/src/cache/confirm.rs @@ -0,0 +1,147 @@ +use std::collections::{BTreeMap, HashMap}; + +use alloy_primitives::B256; +use eyre::eyre; +use reth_primitives_traits::NodePrimitives; +use reth_rpc_eth_types::block::BlockAndReceipts; + +const DEFAULT_CONFIRM_CACHE_SIZE: usize = 5_000; + +/// Confirmed flashblocks sequence cache that is ahead of the current +/// canonical chain. We optimistically commit confirmed flashblocks sequences to +/// the cache and flush them when the canonical chain catches up. +/// +/// Block data is stored in a `BTreeMap` keyed by block number, enabling O(log n) +/// range splits in [`flush_up_to`](Self::flush_up_to). A secondary `HashMap` +/// provides O(1) block hash to block number reverse lookups. +#[derive(Debug)] +pub struct ConfirmCache { + /// Primary storage: block number → (block hash, block + receipts). + /// `BTreeMap` ordering enables efficient range-based flush via `split_off`. + blocks: BTreeMap)>, + /// Reverse index: block hash → block number for O(1) hash-based lookups. + hash_to_number: HashMap, +} + +impl Default for ConfirmCache { + fn default() -> Self { + Self::new() + } +} + +impl ConfirmCache { + /// Creates a new [`ConfirmCache`]. + pub fn new() -> Self { + Self { blocks: BTreeMap::new(), hash_to_number: HashMap::new() } + } + + /// Returns the number of cached entries. + pub fn len(&self) -> usize { + self.blocks.len() + } + + /// Returns `true` if the cache is empty. + pub fn is_empty(&self) -> bool { + self.blocks.is_empty() + } + + /// Inserts a confirmed block into the cache, indexed by both block number + /// and block hash. + /// + /// This is a raw insert with no reorg detection — callers are responsible + /// for flushing invalidated entries via [`flush_from`](Self::flush_from) + /// before inserting if a reorg is detected. + /// + /// Returns an error if the cache is at max capacity. + pub fn insert( + &mut self, + height: u64, + hash: B256, + block: BlockAndReceipts, + ) -> eyre::Result<()> { + if self.blocks.len() >= DEFAULT_CONFIRM_CACHE_SIZE { + return Err(eyre!( + "confirm cache at max capacity ({DEFAULT_CONFIRM_CACHE_SIZE}), cannot insert block: {height}" + )); + } + self.hash_to_number.insert(hash, height); + self.blocks.insert(height, (hash, block)); + Ok(()) + } + + /// Returns the confirmed block for the given block hash, if present. + pub fn get_by_hash(&self, block_hash: &B256) -> Option> { + let number = self.hash_to_number.get(block_hash)?; + self.blocks.get(number).map(|(_, block)| block.clone()) + } + + /// Returns the confirmed block for the given block number, if present. + pub fn get_by_number(&self, block_number: u64) -> Option> { + self.blocks.get(&block_number).map(|(_, block)| block.clone()) + } + + /// Returns the block hash for the given block number, if cached. + pub fn hash_for_number(&self, block_number: u64) -> Option { + self.blocks.get(&block_number).map(|(hash, _)| *hash) + } + + /// Returns `true` if the cache contains a block with the given hash. + pub fn contains_hash(&self, block_hash: &B256) -> bool { + self.hash_to_number.contains_key(block_hash) + } + + /// Returns `true` if the cache contains a block with the given number. + pub fn contains_number(&self, block_number: u64) -> bool { + self.blocks.contains_key(&block_number) + } + + /// Removes and returns the confirmed block for the given block number. + pub fn remove_by_number(&mut self, block_number: u64) -> Option> { + let (hash, block) = self.blocks.remove(&block_number)?; + self.hash_to_number.remove(&hash); + Some(block) + } + + /// Removes and returns the confirmed block for the given block hash. + pub fn remove_by_hash(&mut self, block_hash: &B256) -> Option> { + let number = self.hash_to_number.remove(block_hash)?; + self.blocks.remove(&number).map(|(_, block)| block) + } + + /// Flushes all entries with block number >= `from` (the reorged range). + /// Returns the number of entries flushed. + pub fn flush_from(&mut self, from: u64) -> usize { + let reorged = self.blocks.split_off(&from); + let count = reorged.len(); + for (hash, _) in reorged.into_values() { + self.hash_to_number.remove(&hash); + } + count + } + + /// Flushes all entries with block number <= `canonical_number`. + /// + /// Called when the canonical chain catches up to the confirmed cache. + /// Returns the number of entries flushed. + pub fn flush_up_to(&mut self, canonical_number: u64) -> usize { + let retained = self.blocks.split_off(&(canonical_number + 1)); + let stale = std::mem::replace(&mut self.blocks, retained); + + let count = stale.len(); + for (hash, _) in stale.into_values() { + self.hash_to_number.remove(&hash); + } + count + } + + /// Returns the highest cached block number, or `None` if empty. + pub fn latest_block_number(&self) -> Option { + self.blocks.keys().next_back().copied() + } + + /// Clears all entries. + pub fn clear(&mut self) { + self.blocks.clear(); + self.hash_to_number.clear(); + } +} diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs new file mode 100644 index 00000000..5b8e5a1b --- /dev/null +++ b/crates/flashblocks/src/cache/mod.rs @@ -0,0 +1,4 @@ +pub(crate) mod confirm; +pub(crate) mod pending; +pub(crate) mod raw; +pub(crate) mod state; diff --git a/crates/flashblocks/src/types/payload.rs b/crates/flashblocks/src/cache/pending.rs similarity index 96% rename from crates/flashblocks/src/types/payload.rs rename to crates/flashblocks/src/cache/pending.rs index 503e8409..a1d92546 100644 --- a/crates/flashblocks/src/types/payload.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -1,46 +1,46 @@ +use derive_more::Deref; + use alloy_consensus::BlockHeader; use alloy_primitives::B256; -use derive_more::Deref; use reth_primitives_traits::NodePrimitives; +use reth_revm::cached::CachedReads; use reth_rpc_eth_types::PendingBlock; -/// Type alias for the Optimism flashblock payload. -pub type FlashBlock = op_alloy_rpc_types_engine::OpFlashblockPayload; - -/// The pending block built with all received Flashblocks alongside the metadata for the last added -/// Flashblock. +/// The pending flashblocks sequence built with all received OpFlashblockPayload +/// alongside the metadata for the last added flashblock. #[derive(Debug, Clone, Deref)] -pub struct PendingFlashBlock { - /// The complete pending block built out of all received Flashblocks. +pub struct PendingSequence { + /// Locally built full pending block of the latest flashblocks sequence. #[deref] pub pending: PendingBlock, - /// Canonical anchor hash used for state lookups when this block was built. - /// - /// For canonical builds this equals `pending.block().parent_hash()`. - /// For speculative builds this points to the canonical ancestor used for storage reads. - pub canonical_anchor_hash: B256, - /// A sequential index that identifies the last Flashblock added to this block. + /// The current block hash of the latest flashblocks sequence. + pub block_hash: B256, + /// Parent hash of the built block (may be non-canonical or canonical). + pub parent_hash: B256, + /// The last flashblock index of the latest flashblocks sequence. pub last_flashblock_index: u64, - /// The last Flashblock block hash, - pub last_flashblock_hash: B256, - /// Whether the [`PendingBlock`] has a properly computed stateroot. + /// Cached reads from execution for reuse. + pub cached_reads: CachedReads, + /// Whether the [`PendingFlashblockSequence`] has a properly computed stateroot. pub has_computed_state_root: bool, } -impl PendingFlashBlock { +impl PendingSequence { /// Create new pending flashblock. pub const fn new( pending: PendingBlock, - canonical_anchor_hash: B256, + block_hash: B256, + parent_hash: B256, last_flashblock_index: u64, - last_flashblock_hash: B256, + cached_reads: CachedReads, has_computed_state_root: bool, ) -> Self { Self { pending, - canonical_anchor_hash, + block_hash, + parent_hash, last_flashblock_index, - last_flashblock_hash, + cached_reads, has_computed_state_root, } } @@ -53,7 +53,7 @@ impl PendingFlashBlock { #[cfg(test)] mod tests { - use super::*; + use op_alloy_rpc_types_engine::OpFlashblockPayload; #[test] fn test_flashblock_serde_roundtrip() { @@ -246,9 +246,9 @@ mod tests { "payload_id": "0x0316ecb1aa1671b5" }"#; - let flashblock: FlashBlock = serde_json::from_str(raw).expect("deserialize"); + let flashblock: OpFlashblockPayload = serde_json::from_str(raw).expect("deserialize"); let serialized = serde_json::to_string(&flashblock).expect("serialize"); - let roundtrip: FlashBlock = serde_json::from_str(&serialized).expect("roundtrip"); + let roundtrip: OpFlashblockPayload = serde_json::from_str(&serialized).expect("roundtrip"); assert_eq!(flashblock, roundtrip); } diff --git a/crates/flashblocks/src/cache.rs b/crates/flashblocks/src/cache/raw.rs similarity index 100% rename from crates/flashblocks/src/cache.rs rename to crates/flashblocks/src/cache/raw.rs diff --git a/crates/flashblocks/src/cache/state.rs b/crates/flashblocks/src/cache/state.rs new file mode 100644 index 00000000..5868bc4d --- /dev/null +++ b/crates/flashblocks/src/cache/state.rs @@ -0,0 +1,97 @@ +use std::sync::Arc; + +use alloy_primitives::B256; +use parking_lot::RwLock; +use reth_primitives_traits::NodePrimitives; +use reth_rpc_eth_types::block::BlockAndReceipts; + +use super::{confirm::ConfirmCache, pending::PendingSequence}; + +/// Top-level controller state cache for the flashblocks RPC layer. +/// +/// Composed of: +/// - **Pending**: the in-progress flashblock sequence being built from incoming +/// `OpFlashblockPayload` deltas (at most one active sequence at a time). +/// - **Confirmed**: completed flashblock sequences that have been committed but +/// are still ahead of the canonical chain. +/// +/// Uses `Arc` for thread safety — a single lock protects all inner +/// state, ensuring atomic operations across pending, confirmed, and height +/// state (e.g. reorg detection + flush + insert in `handle_confirmed_block`). +#[derive(Debug, Clone)] +pub struct StateCache { + inner: Arc>>, +} + +impl StateCache { + /// Creates a new [`StateCache`]. + pub fn new(canon_height: u64) -> Self { + Self { inner: Arc::new(RwLock::new(StateCacheInner::new(canon_height))) } + } + + /// Handles a newly confirmed block by detecting reorgs, flushing invalidated + /// entries, and inserting into the confirm blocks cache. + pub fn handle_confirmed_block( + &self, + block_number: u64, + block_hash: B256, + block: BlockAndReceipts, + ) -> eyre::Result<()> { + self.inner.write().handle_confirmed_block(block_number, block_hash, block) + } + + /// Returns the current confirmed cache height, if any blocks have been confirmed. + pub fn get_confirm_height(&self) -> Option { + self.inner.read().confirm_height + } +} + +/// Inner state of the flashblocks state cache. +#[derive(Debug)] +struct StateCacheInner { + /// The current in-progress pending flashblock sequence, if any. + pending: Option>, + /// Cache of confirmed flashblock sequences ahead of the canonical chain. + confirm_cache: ConfirmCache, + /// The highest confirmed block height. + confirm_height: Option, + /// The highest canonical block height. + canon_height: u64, +} + +impl StateCacheInner { + fn new(canon_height: u64) -> Self { + Self { + pending: None, + confirm_cache: ConfirmCache::new(), + confirm_height: None, + canon_height, + } + } + + /// Handles a newly confirmed block with reorg detection. + fn handle_confirmed_block( + &mut self, + block_number: u64, + block_hash: B256, + block: BlockAndReceipts, + ) -> eyre::Result<()> { + if let Some(confirm_height) = self.confirm_height { + // Reorg detection: incoming block is at or behind the last confirmed height. + if block_number <= confirm_height { + self.confirm_cache.flush_from(block_number); + } + } + + self.confirm_cache.insert(block_number, block_hash, block)?; + + // Sanity check: the inserted block must now be the highest in the cache + self.confirm_height = Some(block_number); + if self.confirm_height != self.confirm_cache.latest_block_number() { + return Err(eyre::eyre!( + "confirmed cache latest height mismatch inserted block height: {block_number}" + )); + } + Ok(()) + } +} diff --git a/crates/flashblocks/src/lib.rs b/crates/flashblocks/src/lib.rs index 21700718..f08bf0e0 100644 --- a/crates/flashblocks/src/lib.rs +++ b/crates/flashblocks/src/lib.rs @@ -1,72 +1,18 @@ //! X-Layer flashblocks crate. -pub mod execution; +pub mod cache; +mod execution; pub mod handle; pub mod subscription; +pub mod types; +mod ws; -pub use execution::FlashblockCachedReceipt; - -use reth_primitives_traits::NodePrimitives; -use std::sync::Arc; +#[cfg(test)] +mod test_utils; -pub mod types; +pub use execution::FlashblockCachedReceipt; pub use types::{ FlashBlock, FlashBlockCompleteSequence, FlashBlockPendingSequence, PendingBlockState, PendingFlashBlock, PendingStateRegistry, SequenceExecutionOutcome, }; - -mod service; -pub use service::{ - create_canonical_block_channel, CanonicalBlockNotification, FlashBlockBuildInfo, - FlashBlockService, -}; - -mod cache; - -pub mod validation; - -#[cfg(test)] -mod test_utils; - -mod ws; pub use ws::{FlashBlockDecoder, WsConnect, WsFlashBlockStream}; - -/// Receiver of the most recent [`PendingFlashBlock`] built out of [`FlashBlock`]s. -pub type PendingBlockRx = tokio::sync::watch::Receiver>>; - -/// Receiver of the sequences of [`FlashBlock`]s built. -pub type FlashBlockCompleteSequenceRx = - tokio::sync::broadcast::Receiver; - -/// Receiver of received [`FlashBlock`]s from the (websocket) subscription. -pub type FlashBlockRx = tokio::sync::broadcast::Receiver>; - -/// Receiver that signals whether a [`FlashBlock`] is currently being built. -pub type InProgressFlashBlockRx = tokio::sync::watch::Receiver>; - -/// Container for all flashblocks-related listeners. -/// -/// Groups together the channels for flashblock-related updates. -#[derive(Debug)] -pub struct FlashblocksListeners { - /// Receiver of the most recent executed [`PendingFlashBlock`] built out of [`FlashBlock`]s. - pub pending_block_rx: PendingBlockRx, - /// Subscription channel of the complete sequences of [`FlashBlock`]s built. - pub flashblocks_sequence: tokio::sync::broadcast::Sender, - /// Receiver that signals whether a [`FlashBlock`] is currently being built. - pub in_progress_rx: InProgressFlashBlockRx, - /// Subscription channel for received flashblocks from the (websocket) connection. - pub received_flashblocks: tokio::sync::broadcast::Sender>, -} - -impl FlashblocksListeners { - /// Creates a new [`FlashblocksListeners`] with the given channels. - pub const fn new( - pending_block_rx: PendingBlockRx, - flashblocks_sequence: tokio::sync::broadcast::Sender, - in_progress_rx: InProgressFlashBlockRx, - received_flashblocks: tokio::sync::broadcast::Sender>, - ) -> Self { - Self { pending_block_rx, flashblocks_sequence, in_progress_rx, received_flashblocks } - } -} diff --git a/crates/flashblocks/src/service.rs b/crates/flashblocks/src/service.rs deleted file mode 100644 index dc08a609..00000000 --- a/crates/flashblocks/src/service.rs +++ /dev/null @@ -1,539 +0,0 @@ -use crate::{ - cache::{BuildApplyOutcome, BuildTicket, SequenceManager}, - execution::worker::{BuildResult, FlashBlockBuilder, FlashblockCachedReceipt}, - types::pending_state::PendingStateRegistry, - validation::{CanonicalBlockFingerprint, ReconciliationStrategy}, - FlashBlock, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, InProgressFlashBlockRx, - PendingFlashBlock, -}; -use alloy_primitives::B256; -use futures_util::{FutureExt, Stream, StreamExt}; -use metrics::{Counter, Gauge, Histogram}; -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; -use reth_evm::ConfigureEvm; -use reth_metrics::Metrics; -use reth_primitives_traits::{AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy}; -use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; -use reth_tasks::TaskExecutor; -use std::{ - sync::Arc, - time::{Duration, Instant}, -}; -use tokio::{ - sync::{mpsc, oneshot, watch}, - time::sleep, -}; -use tracing::*; - -const CONNECTION_BACKOUT_PERIOD: Duration = Duration::from_secs(5); - -/// Default maximum depth for pending blocks ahead of canonical. -const DEFAULT_MAX_DEPTH: u64 = 64; - -/// Capacity for the canonical block notification channel. -/// This bounds memory usage while allowing for some buffering during catch-up. -const CANONICAL_BLOCK_CHANNEL_CAPACITY: usize = 128; - -/// Notification about a new canonical block for reconciliation. -#[derive(Debug, Clone)] -pub struct CanonicalBlockNotification { - /// The canonical block number. - pub block_number: u64, - /// Canonical block hash. - pub block_hash: B256, - /// Canonical parent hash. - pub parent_hash: B256, - /// Transaction hashes in the canonical block. - pub tx_hashes: Vec, -} - -/// The `FlashBlockService` maintains an in-memory [`PendingFlashBlock`] built out of a sequence of -/// [`FlashBlock`]s. -#[derive(Debug)] -pub struct FlashBlockService< - N: NodePrimitives, - S, - EvmConfig: ConfigureEvm + Unpin>, - Provider, -> { - /// Incoming flashblock stream. - incoming_flashblock_rx: S, - /// Receiver for canonical block notifications (bounded to prevent OOM). - canonical_block_rx: Option>, - /// Signals when a block build is in progress. - in_progress_tx: watch::Sender>, - /// Broadcast channel to forward received flashblocks from the subscription. - received_flashblocks_tx: tokio::sync::broadcast::Sender>, - - /// Executes flashblock sequences to build pending blocks. - builder: FlashBlockBuilder, - /// Task executor for spawning block build jobs. - spawner: TaskExecutor, - /// Currently running block build job with start time and result receiver. - job: Option>, - /// Manages flashblock sequences with caching and intelligent build selection. - sequences: SequenceManager, - /// Registry for pending block states to enable speculative building. - pending_states: PendingStateRegistry, - - /// Epoch counter for state invalidation. - /// - /// Incremented whenever speculative state is cleared (reorg, catch-up, depth limit). - /// Used to detect and discard stale build results from in-flight jobs that were - /// started before the state was invalidated. - state_epoch: u64, - - /// Maximum depth for pending blocks ahead of canonical before clearing. - max_depth: u64, - /// `FlashBlock` service's metrics - metrics: FlashBlockServiceMetrics, -} - -impl FlashBlockService -where - N: NodePrimitives, - N::Receipt: FlashblockCachedReceipt, - S: Stream> + Unpin + 'static, - EvmConfig: ConfigureEvm + Unpin> - + Clone - + 'static, - Provider: StateProviderFactory - + BlockReaderIdExt< - Header = HeaderTy, - Block = BlockTy, - Transaction = N::SignedTx, - Receipt = ReceiptTy, - > + Unpin - + Clone - + 'static, -{ - /// Constructs a new `FlashBlockService` that receives [`FlashBlock`]s from `rx` stream. - pub fn new( - incoming_flashblock_rx: S, - evm_config: EvmConfig, - provider: Provider, - spawner: TaskExecutor, - compute_state_root: bool, - ) -> Self { - let (in_progress_tx, _) = watch::channel(None); - let (received_flashblocks_tx, _) = tokio::sync::broadcast::channel(128); - Self { - incoming_flashblock_rx, - canonical_block_rx: None, - in_progress_tx, - received_flashblocks_tx, - builder: FlashBlockBuilder::new(evm_config, provider), - spawner, - job: None, - sequences: SequenceManager::new(compute_state_root), - pending_states: PendingStateRegistry::new(), - state_epoch: 0, - max_depth: DEFAULT_MAX_DEPTH, - metrics: FlashBlockServiceMetrics::default(), - } - } - - /// Sets the canonical block receiver for reconciliation. - /// - /// When canonical blocks are received, the service will reconcile the pending - /// flashblock state to handle catch-up and reorg scenarios. - /// - /// The channel should be bounded to prevent unbounded memory growth. Use - /// [`create_canonical_block_channel`] to create a properly sized channel. - pub fn with_canonical_block_rx( - mut self, - rx: mpsc::Receiver, - ) -> Self { - self.canonical_block_rx = Some(rx); - self - } - - /// Sets the maximum depth for pending blocks ahead of canonical. - /// - /// If pending blocks get too far ahead of the canonical chain, the pending - /// state will be cleared to prevent unbounded memory growth. - pub const fn with_max_depth(mut self, max_depth: u64) -> Self { - self.max_depth = max_depth; - self - } - - /// Returns the sender half for the received flashblocks broadcast channel. - pub const fn flashblocks_broadcaster( - &self, - ) -> &tokio::sync::broadcast::Sender> { - &self.received_flashblocks_tx - } - - /// Returns the sender half for the flashblock sequence broadcast channel. - pub const fn block_sequence_broadcaster( - &self, - ) -> &tokio::sync::broadcast::Sender { - self.sequences.block_sequence_broadcaster() - } - - /// Returns a subscriber to the flashblock sequence. - pub fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { - self.sequences.subscribe_block_sequence() - } - - /// Returns a receiver that signals when a flashblock is being built. - pub fn subscribe_in_progress(&self) -> InProgressFlashBlockRx { - self.in_progress_tx.subscribe() - } - - /// Drives the service and sends new blocks to the receiver. - /// - /// This loop: - /// 1. Checks if any build job has completed and processes results - /// 2. Receives and batches all immediately available flashblocks - /// 3. Processes canonical block notifications for reconciliation - /// 4. Attempts to build a block from the complete sequence - /// - /// Note: this should be spawned - pub async fn run(mut self, tx: watch::Sender>>) { - loop { - tokio::select! { - // Event 1: job exists, listen to job results - // Handle both successful results and channel errors (e.g., task panic) - job_result = async { - match self.job.as_mut() { - Some(job) => Some((&mut job.result_rx).await), - None => std::future::pending().await, - } - } => { - let job = self.job.take().unwrap(); - let _ = self.in_progress_tx.send(None); - - // Handle channel error (task panicked or was cancelled) - let Some(Ok((result, returned_builder))) = job_result else { - warn!( - target: "flashblocks", - "Build job channel closed unexpectedly (task may have panicked)" - ); - // Re-initialize transaction cache since we lost the one sent to the task - self.builder.reset_cache(); - self.schedule_followup_build(); - continue; - }; - - // Check if the state epoch has changed since this job started. - // If so, the speculative state has been invalidated (e.g., by a reorg) - // and we should discard the build result AND the returned cache to avoid - // reintroducing stale state that was cleared during reconciliation. - if job.epoch != self.state_epoch { - trace!( - target: "flashblocks", - job_epoch = job.epoch, - current_epoch = self.state_epoch, - "Discarding stale build result and cache (state was invalidated)" - ); - self.metrics.stale_builds_discarded.increment(1); - // Don't restore the returned cache - keep the cleared cache from reconciliation - self.schedule_followup_build(); - continue; - } - - // Restore the transaction cache from the spawned task (only if epoch matched) - self.builder.merge_cache(returned_builder); - - match result { - Ok(Some(build_result)) => { - let pending = build_result.pending_flashblock; - let apply_outcome = self.sequences - .on_build_complete(job.ticket, Some((pending.clone(), build_result.cached_reads))); - - if apply_outcome.is_applied() { - // Record pending state for speculative building of subsequent blocks - self.pending_states.record_build(build_result.pending_state); - - let elapsed = job.start_time.elapsed(); - self.metrics.execution_duration.record(elapsed.as_secs_f64()); - - let _ = tx.send(Some(pending)); - } else { - match apply_outcome { - BuildApplyOutcome::RejectedPendingSequenceMismatch { .. } => { - self.metrics - .build_reject_pending_sequence_mismatch - .increment(1); - } - BuildApplyOutcome::RejectedPendingRevisionStale { .. } => { - self.metrics - .build_reject_pending_revision_stale - .increment(1); - } - BuildApplyOutcome::RejectedCachedSequenceMissing { .. } => { - self.metrics - .build_reject_cached_sequence_missing - .increment(1); - } - BuildApplyOutcome::SkippedNoBuildResult => { - self.metrics - .build_reject_missing_build_result - .increment(1); - } - BuildApplyOutcome::AppliedPending - | BuildApplyOutcome::AppliedCached { .. } => {} - } - trace!( - target: "flashblocks", - ?apply_outcome, - "Discarding build side effects due to rejected completion apply" - ); - } - } - Ok(None) => { - trace!(target: "flashblocks", "Build job returned None"); - } - Err(err) => { - warn!(target: "flashblocks", %err, "Build job failed"); - } - } - - // Drain runnable work after each completion instead of waiting for another - // external event. - self.schedule_followup_build(); - } - - // Event 2: New flashblock arrives (batch process all ready flashblocks) - result = self.incoming_flashblock_rx.next() => { - match result { - Some(Ok(flashblock)) => { - // Process first flashblock - self.process_flashblock(flashblock); - - // Batch process all other immediately available flashblocks - while let Some(result) = self.incoming_flashblock_rx.next().now_or_never().flatten() { - match result { - Ok(fb) => self.process_flashblock(fb), - Err(err) => warn!(target: "flashblocks", %err, "Error receiving flashblock"), - } - } - - self.try_start_build_job(); - } - Some(Err(err)) => { - warn!( - target: "flashblocks", - %err, - retry_period = CONNECTION_BACKOUT_PERIOD.as_secs(), - "Error receiving flashblock" - ); - sleep(CONNECTION_BACKOUT_PERIOD).await; - } - None => { - warn!(target: "flashblocks", "Flashblock stream ended"); - break; - } - } - } - - // Event 3: Canonical block notification for reconciliation - Some(notification) = async { - match self.canonical_block_rx.as_mut() { - Some(rx) => rx.recv().await, - None => std::future::pending().await, - } - } => { - self.process_canonical_block(notification); - // Try to build after reconciliation in case we can now build - self.try_start_build_job(); - } - } - } - } - - /// Attempts to start the next build after a completion and records outcome metrics. - fn schedule_followup_build(&mut self) { - self.metrics.drain_followup_attempts.increment(1); - if self.try_start_build_job() { - self.metrics.drain_followup_started.increment(1); - } else { - self.metrics.drain_followup_noop.increment(1); - } - } - - /// Processes a canonical block notification and reconciles pending state. - fn process_canonical_block(&mut self, notification: CanonicalBlockNotification) { - let canonical_fingerprint = CanonicalBlockFingerprint { - block_number: notification.block_number, - block_hash: notification.block_hash, - parent_hash: notification.parent_hash, - tx_hashes: notification.tx_hashes, - }; - - let strategy = - self.sequences.process_canonical_block(canonical_fingerprint, self.max_depth); - - // Record metrics based on strategy - if matches!(strategy, ReconciliationStrategy::HandleReorg) { - self.metrics.reorg_count.increment(1); - } - - // Clear pending states and transaction cache for strategies that invalidate speculative - // state. Also increment the state epoch to invalidate any in-flight build jobs. - if matches!( - strategy, - ReconciliationStrategy::HandleReorg - | ReconciliationStrategy::CatchUp - | ReconciliationStrategy::DepthLimitExceeded { .. } - ) { - self.pending_states.clear(); - self.builder.clear_cache(); - self.state_epoch = self.state_epoch.wrapping_add(1); - trace!( - target: "flashblocks", - new_epoch = self.state_epoch, - ?strategy, - "State invalidated, incremented epoch" - ); - } - } - - /// Processes a single flashblock: notifies subscribers, records metrics, and inserts into - /// sequence. - fn process_flashblock(&mut self, flashblock: FlashBlock) { - self.notify_received_flashblock(&flashblock); - - if flashblock.index == 0 { - self.metrics.last_flashblock_length.record(self.sequences.pending().count() as f64); - } - - if let Err(err) = self.sequences.insert_flashblock(flashblock) { - trace!(target: "flashblocks", %err, "Failed to insert flashblock"); - } - } - - /// Notifies all subscribers about the received flashblock. - fn notify_received_flashblock(&self, flashblock: &FlashBlock) { - if self.received_flashblocks_tx.receiver_count() > 0 { - let _ = self.received_flashblocks_tx.send(Arc::new(flashblock.clone())); - } - } - - /// Attempts to build a block if no job is currently running and a buildable sequence exists. - fn try_start_build_job(&mut self) -> bool { - if self.job.is_some() { - return false; // Already building - } - - let Some(latest) = self.builder.provider().latest_header().ok().flatten() else { - return false; - }; - - // Prefer parent-hash-specific speculative context for the current pending sequence. - // Fall back to the latest speculative state when no exact parent match is found. - let pending_parent = self - .sequences - .pending() - .payload_base() - .and_then(|base| self.pending_states.get_state_for_parent(base.parent_hash).cloned()) - .or_else(|| self.pending_states.current().cloned()); - - let Some(candidate) = - self.sequences.next_buildable_args(latest.hash(), latest.timestamp(), pending_parent) - else { - return false; // Nothing buildable - }; - let ticket = candidate.ticket; - let args = candidate.args; - - // Spawn build job - let fb_info = FlashBlockBuildInfo { - parent_hash: args.base.parent_hash, - index: args.last_flashblock_index, - block_number: args.base.block_number, - }; - self.metrics.current_block_height.set(fb_info.block_number as f64); - self.metrics.current_index.set(fb_info.index as f64); - let _ = self.in_progress_tx.send(Some(fb_info)); - - let (result_tx, result_rx) = oneshot::channel(); - let mut builder = self.builder.fork_with_cache(); - self.spawner.spawn_blocking(Box::pin(async move { - let result = builder.execute(args); - let _ = result_tx.send((result, builder)); - })); - self.job = Some(BuildJob { - start_time: Instant::now(), - epoch: self.state_epoch, - ticket, - result_rx, - }); - true - } -} - -/// Information for a flashblock currently built -#[derive(Debug, Clone, Copy)] -pub struct FlashBlockBuildInfo { - /// Parent block hash - pub parent_hash: B256, - /// Flashblock index within the current block's sequence - pub index: u64, - /// Block number of the flashblock being built. - pub block_number: u64, -} - -/// A running build job with metadata for tracking and invalidation. -#[derive(Debug)] -struct BuildJob { - /// When the job was started. - start_time: Instant, - /// The state epoch when this job was started. - /// - /// If the service's `state_epoch` has changed by the time this job completes, - /// the result should be discarded as the speculative state has been invalidated. - epoch: u64, - /// Opaque ticket identifying the exact sequence snapshot targeted by this build job. - ticket: BuildTicket, - /// Receiver for the build result and returned builder (with updated cache). - #[allow(clippy::type_complexity)] - result_rx: oneshot::Receiver<( - eyre::Result>>, - FlashBlockBuilder, - )>, -} - -/// Creates a bounded channel for canonical block notifications. -/// -/// This returns a sender/receiver pair with a bounded capacity to prevent -/// unbounded memory growth. If the receiver falls behind, senders will -/// block until space is available. -/// -/// Returns `(sender, receiver)` tuple for use with [`FlashBlockService::with_canonical_block_rx`]. -pub fn create_canonical_block_channel( -) -> (mpsc::Sender, mpsc::Receiver) { - mpsc::channel(CANONICAL_BLOCK_CHANNEL_CAPACITY) -} - -#[derive(Metrics)] -#[metrics(scope = "flashblock_service")] -struct FlashBlockServiceMetrics { - /// The last complete length of flashblocks per block. - last_flashblock_length: Histogram, - /// The duration applying flashblock state changes in seconds. - execution_duration: Histogram, - /// Current block height. - current_block_height: Gauge, - /// Current flashblock index. - current_index: Gauge, - /// Number of reorgs detected during canonical block reconciliation. - reorg_count: Counter, - /// Number of build results discarded due to state invalidation (reorg during build). - stale_builds_discarded: Counter, - /// Number of completions rejected because pending sequence identity no longer matched. - build_reject_pending_sequence_mismatch: Counter, - /// Number of completions rejected because pending revision no longer matched. - build_reject_pending_revision_stale: Counter, - /// Number of completions rejected because referenced cached sequence was missing. - build_reject_cached_sequence_missing: Counter, - /// Number of completions skipped due to missing build result payload. - build_reject_missing_build_result: Counter, - /// Number of follow-up drain scheduling attempts after build completion. - drain_followup_attempts: Counter, - /// Number of follow-up attempts that successfully started another build. - drain_followup_started: Counter, - /// Number of follow-up attempts where no buildable work was available. - drain_followup_noop: Counter, -} diff --git a/crates/flashblocks/src/types/mod.rs b/crates/flashblocks/src/types/mod.rs index 3104bd55..3f6c63cc 100644 --- a/crates/flashblocks/src/types/mod.rs +++ b/crates/flashblocks/src/types/mod.rs @@ -1,9 +1,5 @@ -pub(crate) mod payload; -pub(crate) mod pending_state; pub(crate) mod sequence; -pub use payload::{FlashBlock, PendingFlashBlock}; -pub use pending_state::{PendingBlockState, PendingStateRegistry}; pub use sequence::{ FlashBlockCompleteSequence, FlashBlockPendingSequence, SequenceExecutionOutcome, }; diff --git a/crates/flashblocks/src/types/pending_state.rs b/crates/flashblocks/src/types/pending_state.rs deleted file mode 100644 index 6c367658..00000000 --- a/crates/flashblocks/src/types/pending_state.rs +++ /dev/null @@ -1,388 +0,0 @@ -//! Pending block state for speculative flashblock building. -//! -//! This module provides types for tracking execution state from flashblock builds, -//! enabling speculative building of subsequent blocks before their parent canonical -//! block arrives via P2P. - -use alloy_primitives::B256; -use reth_execution_types::BlockExecutionOutput; -use reth_primitives_traits::{HeaderTy, NodePrimitives, SealedHeader}; -use reth_revm::cached::CachedReads; -use std::{ - collections::{HashMap, VecDeque}, - sync::Arc, -}; - -/// Tracks the execution state from building a pending block. -/// -/// This is used to enable speculative building of subsequent blocks: -/// - When flashblocks for block N+1 arrive before canonical block N -/// - The pending state from building block N's flashblocks can be used -/// - This allows continuous flashblock processing without waiting for P2P -#[derive(Debug, Clone)] -pub struct PendingBlockState { - /// Locally computed block hash for this built block. - /// - /// This hash is used to match subsequent flashblock sequences by `parent_hash` - /// during speculative chaining. - pub block_hash: B256, - /// Block number that was built. - pub block_number: u64, - /// Parent hash of the built block (may be non-canonical for speculative builds). - pub parent_hash: B256, - /// Canonical anchor hash for state lookups. - /// - /// This is the hash used for `history_by_block_hash` when loading state. - /// For canonical builds, this equals `parent_hash`. - /// For speculative builds, this is the canonical block hash that the chain - /// of speculative builds is rooted at (forwarded from parent's anchor). - pub canonical_anchor_hash: B256, - /// Execution outcome containing state changes. - pub execution_outcome: Arc>, - /// Cached reads from execution for reuse. - pub cached_reads: CachedReads, - /// Sealed header for this built block. - /// - /// Used as the parent header for speculative child builds. - pub sealed_header: Option>>, -} - -impl PendingBlockState { - /// Creates a new pending block state. - pub const fn new( - block_hash: B256, - block_number: u64, - parent_hash: B256, - canonical_anchor_hash: B256, - execution_outcome: Arc>, - cached_reads: CachedReads, - ) -> Self { - Self { - block_hash, - block_number, - parent_hash, - canonical_anchor_hash, - execution_outcome, - cached_reads, - sealed_header: None, - } - } - - /// Attaches a sealed header for use as parent context in speculative builds. - pub fn with_sealed_header(mut self, sealed_header: SealedHeader>) -> Self { - self.sealed_header = Some(sealed_header); - self - } -} - -/// Registry of pending block states for speculative building. -/// -/// Maintains a small cache of recently built pending blocks, allowing -/// subsequent flashblock sequences to build on top of them even before -/// the canonical blocks arrive. -#[derive(Debug)] -pub struct PendingStateRegistry { - /// Executed pending states keyed by locally computed block hash. - by_block_hash: HashMap>, - /// Insertion order for bounded eviction. - insertion_order: VecDeque, - /// Most recently recorded block hash. - latest_block_hash: Option, - /// Maximum number of tracked pending states. - max_entries: usize, -} - -impl PendingStateRegistry { - const DEFAULT_MAX_ENTRIES: usize = 64; - - /// Creates a new pending state registry. - pub fn new() -> Self { - Self::with_max_entries(Self::DEFAULT_MAX_ENTRIES) - } - - /// Creates a new pending state registry with an explicit entry bound. - pub fn with_max_entries(max_entries: usize) -> Self { - let max_entries = max_entries.max(1); - Self { - by_block_hash: HashMap::with_capacity(max_entries), - insertion_order: VecDeque::with_capacity(max_entries), - latest_block_hash: None, - max_entries, - } - } - - /// Records a completed build's state for potential use by subsequent builds. - pub fn record_build(&mut self, state: PendingBlockState) { - let block_hash = state.block_hash; - - if self.by_block_hash.contains_key(&block_hash) { - self.insertion_order.retain(|hash| *hash != block_hash); - } - - self.by_block_hash.insert(block_hash, state); - self.insertion_order.push_back(block_hash); - self.latest_block_hash = Some(block_hash); - - while self.by_block_hash.len() > self.max_entries { - let Some(evicted_hash) = self.insertion_order.pop_front() else { - break; - }; - self.by_block_hash.remove(&evicted_hash); - if self.latest_block_hash == Some(evicted_hash) { - self.latest_block_hash = self.insertion_order.back().copied(); - } - } - } - - /// Gets the pending state for a given parent hash, if available. - /// - /// Returns `Some` if we have pending state whose `block_hash` matches the requested - /// `parent_hash`. - pub fn get_state_for_parent(&self, parent_hash: B256) -> Option<&PendingBlockState> { - self.by_block_hash.get(&parent_hash) - } - - /// Clears all pending state. - pub fn clear(&mut self) { - self.by_block_hash.clear(); - self.insertion_order.clear(); - self.latest_block_hash = None; - } - - /// Returns the current pending state, if any. - pub fn current(&self) -> Option<&PendingBlockState> { - self.latest_block_hash.and_then(|hash| self.by_block_hash.get(&hash)) - } -} - -impl Default for PendingStateRegistry { - fn default() -> Self { - Self::new() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use reth_optimism_primitives::OpPrimitives; - - type TestRegistry = PendingStateRegistry; - - #[test] - fn test_registry_returns_state_for_matching_parent() { - let mut registry = TestRegistry::new(); - - let block_hash = B256::repeat_byte(1); - let parent_hash = B256::repeat_byte(0); - let state = PendingBlockState { - block_hash, - block_number: 100, - parent_hash, - canonical_anchor_hash: parent_hash, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - registry.record_build(state); - - // Should find state when querying with matching block_hash as parent - let result = registry.get_state_for_parent(block_hash); - assert!(result.is_some()); - assert_eq!(result.unwrap().block_number, 100); - } - - #[test] - fn test_registry_returns_none_for_wrong_parent() { - let mut registry = TestRegistry::new(); - - let parent_hash = B256::repeat_byte(0); - let state = PendingBlockState { - block_hash: B256::repeat_byte(1), - block_number: 100, - parent_hash, - canonical_anchor_hash: parent_hash, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - registry.record_build(state); - - // Different parent hash should return None - assert!(registry.get_state_for_parent(B256::repeat_byte(2)).is_none()); - } - - #[test] - fn test_registry_clear() { - let mut registry = TestRegistry::new(); - - let parent_hash = B256::repeat_byte(0); - let state = PendingBlockState { - block_hash: B256::repeat_byte(1), - block_number: 100, - parent_hash, - canonical_anchor_hash: parent_hash, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - registry.record_build(state); - assert!(registry.current().is_some()); - - registry.clear(); - assert!(registry.current().is_none()); - } - - #[test] - fn test_registry_tracks_multiple_states_by_hash() { - let mut registry = TestRegistry::new(); - - let anchor = B256::repeat_byte(0); - let state_100 = PendingBlockState { - block_hash: B256::repeat_byte(1), - block_number: 100, - parent_hash: anchor, - canonical_anchor_hash: anchor, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - let state_101 = PendingBlockState { - block_hash: B256::repeat_byte(2), - block_number: 101, - parent_hash: state_100.block_hash, - canonical_anchor_hash: anchor, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - registry.record_build(state_100.clone()); - registry.record_build(state_101.clone()); - - assert_eq!(registry.current().map(|s| s.block_number), Some(101)); - assert_eq!( - registry.get_state_for_parent(state_100.block_hash).map(|s| s.block_number), - Some(100) - ); - assert_eq!( - registry.get_state_for_parent(state_101.block_hash).map(|s| s.block_number), - Some(101) - ); - } - - #[test] - fn test_registry_eviction_respects_max_entries() { - let mut registry = PendingStateRegistry::::with_max_entries(2); - let anchor = B256::repeat_byte(0); - - let state_100 = PendingBlockState { - block_hash: B256::repeat_byte(1), - block_number: 100, - parent_hash: anchor, - canonical_anchor_hash: anchor, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - let state_101 = PendingBlockState { - block_hash: B256::repeat_byte(2), - block_number: 101, - parent_hash: state_100.block_hash, - canonical_anchor_hash: anchor, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - let state_102 = PendingBlockState { - block_hash: B256::repeat_byte(3), - block_number: 102, - parent_hash: state_101.block_hash, - canonical_anchor_hash: anchor, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - registry.record_build(state_100); - registry.record_build(state_101.clone()); - registry.record_build(state_102.clone()); - - assert!(registry.get_state_for_parent(B256::repeat_byte(1)).is_none()); - assert_eq!( - registry.get_state_for_parent(state_101.block_hash).map(|s| s.block_number), - Some(101) - ); - assert_eq!( - registry.get_state_for_parent(state_102.block_hash).map(|s| s.block_number), - Some(102) - ); - assert_eq!(registry.current().map(|s| s.block_number), Some(102)); - } - - /// Tests that `canonical_anchor_hash` is distinct from `parent_hash` in speculative chains. - /// - /// When building speculatively: - /// - Block N (canonical): `parent_hash` = N-1, `canonical_anchor` = N-1 (same) - /// - Block N+1 (speculative): `parent_hash` = N, `canonical_anchor` = N-1 (forwarded) - /// - Block N+2 (speculative): `parent_hash` = N+1, `canonical_anchor` = N-1 (still forwarded) - /// - /// The `canonical_anchor_hash` always points to the last canonical block used for - /// `history_by_block_hash` lookups. - #[test] - fn test_canonical_anchor_forwarding_semantics() { - // Canonical block N-1 (the anchor for speculative chain) - let canonical_anchor = B256::repeat_byte(0x00); - - // Block N built on canonical - anchor equals parent - let block_n_hash = B256::repeat_byte(0x01); - let state_n = PendingBlockState:: { - block_hash: block_n_hash, - block_number: 100, - parent_hash: canonical_anchor, - canonical_anchor_hash: canonical_anchor, // Same as parent for canonical build - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - // Verify block N's anchor is the canonical block - assert_eq!(state_n.canonical_anchor_hash, canonical_anchor); - assert_eq!(state_n.parent_hash, state_n.canonical_anchor_hash); - - // Block N+1 built speculatively on N - anchor is FORWARDED from N - let block_n1_hash = B256::repeat_byte(0x02); - let state_n1 = PendingBlockState:: { - block_hash: block_n1_hash, - block_number: 101, - parent_hash: block_n_hash, // Parent is block N - canonical_anchor_hash: state_n.canonical_anchor_hash, // Forwarded from N - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - // Verify N+1's anchor is still the canonical block, NOT block N - assert_eq!(state_n1.canonical_anchor_hash, canonical_anchor); - assert_ne!(state_n1.parent_hash, state_n1.canonical_anchor_hash); - - // Block N+2 built speculatively on N+1 - anchor still forwarded - let block_n2_hash = B256::repeat_byte(0x03); - let state_n2 = PendingBlockState:: { - block_hash: block_n2_hash, - block_number: 102, - parent_hash: block_n1_hash, // Parent is block N+1 - canonical_anchor_hash: state_n1.canonical_anchor_hash, // Forwarded from N+1 - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - // Verify N+2's anchor is STILL the original canonical block - assert_eq!(state_n2.canonical_anchor_hash, canonical_anchor); - assert_ne!(state_n2.parent_hash, state_n2.canonical_anchor_hash); - - // All three blocks should have the same canonical anchor - assert_eq!(state_n.canonical_anchor_hash, state_n1.canonical_anchor_hash); - assert_eq!(state_n1.canonical_anchor_hash, state_n2.canonical_anchor_hash); - } -} diff --git a/crates/flashblocks/src/types/sequence.rs b/crates/flashblocks/src/types/sequence.rs index dabbb94d..35f54cc4 100644 --- a/crates/flashblocks/src/types/sequence.rs +++ b/crates/flashblocks/src/types/sequence.rs @@ -1,16 +1,13 @@ -use crate::{FlashBlock, FlashBlockCompleteSequenceRx}; -use alloy_primitives::{Bytes, B256}; -use alloy_rpc_types_engine::PayloadId; +use crate::FlashBlock; use core::mem; use eyre::{bail, OptionExt}; -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; -use reth_revm::cached::CachedReads; use std::{collections::BTreeMap, ops::Deref}; -use tokio::sync::broadcast; use tracing::*; -/// The size of the broadcast channel for completed flashblock sequences. -const FLASHBLOCK_SEQUENCE_CHANNEL_SIZE: usize = 128; +use alloy_primitives::{Bytes, B256}; +use alloy_rpc_types_engine::PayloadId; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; +use reth_revm::cached::CachedReads; #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum FollowupRejectionReason { @@ -40,12 +37,10 @@ pub struct SequenceExecutionOutcome { } /// An ordered B-tree keeping the track of a sequence of [`FlashBlock`]s by their indices. -#[derive(Debug)] +#[derive(Debug, Default)] pub struct FlashBlockPendingSequence { /// tracks the individual flashblocks in order inner: BTreeMap, - /// Broadcasts flashblocks to subscribers. - block_broadcaster: broadcast::Sender, /// Optional execution outcome from building the current sequence. execution_outcome: Option, /// Cached state reads for the current block. @@ -56,31 +51,6 @@ pub struct FlashBlockPendingSequence { } impl FlashBlockPendingSequence { - /// Create a new pending sequence. - pub fn new() -> Self { - // Note: if the channel is full, send will not block but rather overwrite the oldest - // messages. Order is preserved. - let (tx, _) = broadcast::channel(FLASHBLOCK_SEQUENCE_CHANNEL_SIZE); - Self { - inner: BTreeMap::new(), - block_broadcaster: tx, - execution_outcome: None, - cached_reads: None, - } - } - - /// Returns the sender half of the [`FlashBlockCompleteSequence`] channel. - pub const fn block_sequence_broadcaster( - &self, - ) -> &broadcast::Sender { - &self.block_broadcaster - } - - /// Gets a subscriber to the flashblock sequences produced. - pub fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { - self.block_broadcaster.subscribe() - } - /// Returns whether this flashblock would be accepted into the current sequence. pub fn can_accept(&self, flashblock: &FlashBlock) -> bool { if flashblock.index == 0 { @@ -207,12 +177,6 @@ impl FlashBlockPendingSequence { } } -impl Default for FlashBlockPendingSequence { - fn default() -> Self { - Self::new() - } -} - /// A complete sequence of flashblocks, often corresponding to a full block. /// /// Ensures invariants of a complete flashblocks sequence. From 8bb7fc0b705913e99bce3a92322cc624de1daaa3 Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 9 Mar 2026 18:14:03 +0800 Subject: [PATCH 04/76] chore(flashblocks-rpc): remove types module and clean up re-exports MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- crates/flashblocks/src/lib.rs | 5 - crates/flashblocks/src/types/mod.rs | 5 - crates/flashblocks/src/types/sequence.rs | 704 ----------------------- 3 files changed, 714 deletions(-) delete mode 100644 crates/flashblocks/src/types/mod.rs delete mode 100644 crates/flashblocks/src/types/sequence.rs diff --git a/crates/flashblocks/src/lib.rs b/crates/flashblocks/src/lib.rs index f08bf0e0..2f128506 100644 --- a/crates/flashblocks/src/lib.rs +++ b/crates/flashblocks/src/lib.rs @@ -4,15 +4,10 @@ pub mod cache; mod execution; pub mod handle; pub mod subscription; -pub mod types; mod ws; #[cfg(test)] mod test_utils; pub use execution::FlashblockCachedReceipt; -pub use types::{ - FlashBlock, FlashBlockCompleteSequence, FlashBlockPendingSequence, PendingBlockState, - PendingFlashBlock, PendingStateRegistry, SequenceExecutionOutcome, -}; pub use ws::{FlashBlockDecoder, WsConnect, WsFlashBlockStream}; diff --git a/crates/flashblocks/src/types/mod.rs b/crates/flashblocks/src/types/mod.rs deleted file mode 100644 index 3f6c63cc..00000000 --- a/crates/flashblocks/src/types/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub(crate) mod sequence; - -pub use sequence::{ - FlashBlockCompleteSequence, FlashBlockPendingSequence, SequenceExecutionOutcome, -}; diff --git a/crates/flashblocks/src/types/sequence.rs b/crates/flashblocks/src/types/sequence.rs deleted file mode 100644 index 35f54cc4..00000000 --- a/crates/flashblocks/src/types/sequence.rs +++ /dev/null @@ -1,704 +0,0 @@ -use crate::FlashBlock; -use core::mem; -use eyre::{bail, OptionExt}; -use std::{collections::BTreeMap, ops::Deref}; -use tracing::*; - -use alloy_primitives::{Bytes, B256}; -use alloy_rpc_types_engine::PayloadId; -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; -use reth_revm::cached::CachedReads; - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -enum FollowupRejectionReason { - BlockNumber, - PayloadId, - BlockAndPayload, -} - -impl FollowupRejectionReason { - const fn as_str(self) -> &'static str { - match self { - Self::BlockNumber => "block_number_mismatch", - Self::PayloadId => "payload_id_mismatch", - Self::BlockAndPayload => "block_and_payload_mismatch", - } - } -} - -/// Outcome from executing a flashblock sequence. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[allow(unnameable_types)] -pub struct SequenceExecutionOutcome { - /// The block hash of the executed pending block - pub block_hash: B256, - /// Properly computed state root - pub state_root: B256, -} - -/// An ordered B-tree keeping the track of a sequence of [`FlashBlock`]s by their indices. -#[derive(Debug, Default)] -pub struct FlashBlockPendingSequence { - /// tracks the individual flashblocks in order - inner: BTreeMap, - /// Optional execution outcome from building the current sequence. - execution_outcome: Option, - /// Cached state reads for the current block. - /// Current `PendingFlashBlock` is built out of a sequence of `FlashBlocks`, and executed again - /// when fb received on top of the same block. Avoid redundant I/O across multiple - /// executions within the same block. - cached_reads: Option, -} - -impl FlashBlockPendingSequence { - /// Returns whether this flashblock would be accepted into the current sequence. - pub fn can_accept(&self, flashblock: &FlashBlock) -> bool { - if flashblock.index == 0 { - return true; - } - - self.followup_rejection_reason(flashblock).is_none() - } - - fn followup_rejection_reason( - &self, - flashblock: &FlashBlock, - ) -> Option { - // only insert if we previously received the same block and payload, assume we received - // index 0 - let same_block = self.block_number() == Some(flashblock.block_number()); - let same_payload = self.payload_id() == Some(flashblock.payload_id); - if same_block && same_payload { - None - } else if !same_block && !same_payload { - Some(FollowupRejectionReason::BlockAndPayload) - } else if !same_block { - Some(FollowupRejectionReason::BlockNumber) - } else { - Some(FollowupRejectionReason::PayloadId) - } - } - - /// Inserts a new block into the sequence. - /// - /// A [`FlashBlock`] with index 0 resets the set. - pub fn insert(&mut self, flashblock: FlashBlock) { - if flashblock.index == 0 { - trace!(target: "flashblocks", number=%flashblock.block_number(), "Tracking new flashblock sequence"); - self.inner.insert(flashblock.index, flashblock); - return; - } - - if self.can_accept(&flashblock) { - trace!(target: "flashblocks", number=%flashblock.block_number(), index = %flashblock.index, block_count = self.inner.len() ,"Received followup flashblock"); - self.inner.insert(flashblock.index, flashblock); - } else { - let rejection_reason = self - .followup_rejection_reason(&flashblock) - .expect("non-accepted followup must have rejection reason"); - trace!( - target: "flashblocks", - number = %flashblock.block_number(), - index = %flashblock.index, - current_block_number = ?self.block_number(), - expected_payload_id = ?self.payload_id(), - incoming_payload_id = ?flashblock.payload_id, - rejection_reason = rejection_reason.as_str(), - "Ignoring untracked flashblock following" - ); - } - } - - /// Set execution outcome from building the flashblock sequence - pub const fn set_execution_outcome( - &mut self, - execution_outcome: Option, - ) { - self.execution_outcome = execution_outcome; - } - - /// Set cached reads for this sequence - pub fn set_cached_reads(&mut self, cached_reads: CachedReads) { - self.cached_reads = Some(cached_reads); - } - - /// Removes the cached reads for this sequence - pub const fn take_cached_reads(&mut self) -> Option { - self.cached_reads.take() - } - - /// Returns the first block number - pub fn block_number(&self) -> Option { - Some(self.inner.values().next()?.block_number()) - } - - /// Returns the payload base of the first tracked flashblock. - pub fn payload_base(&self) -> Option { - self.inner.values().next()?.base.clone() - } - - /// Returns the number of tracked flashblocks. - pub fn count(&self) -> usize { - self.inner.len() - } - - /// Returns the reference to the last flashblock. - pub fn last_flashblock(&self) -> Option<&FlashBlock> { - self.inner.last_key_value().map(|(_, b)| b) - } - - /// Returns the current/latest flashblock index in the sequence - pub fn index(&self) -> Option { - Some(self.inner.values().last()?.index) - } - /// Returns the payload id of the first tracked flashblock in the current sequence. - pub fn payload_id(&self) -> Option { - Some(self.inner.values().next()?.payload_id) - } - - /// Finalizes the current pending sequence and returns it as a complete sequence. - /// - /// Clears the internal state and returns an error if the sequence is empty or validation fails. - pub fn finalize(&mut self) -> eyre::Result { - if self.inner.is_empty() { - bail!("Cannot finalize empty flashblock sequence"); - } - - let flashblocks = mem::take(&mut self.inner); - let execution_outcome = mem::take(&mut self.execution_outcome); - self.cached_reads = None; - - FlashBlockCompleteSequence::new(flashblocks.into_values().collect(), execution_outcome) - } - - /// Returns an iterator over all flashblocks in the sequence. - pub fn flashblocks(&self) -> impl Iterator { - self.inner.values() - } -} - -/// A complete sequence of flashblocks, often corresponding to a full block. -/// -/// Ensures invariants of a complete flashblocks sequence. -/// If this entire sequence of flashblocks was executed on top of latest block, this also includes -/// the execution outcome with block hash and state root. -#[derive(Debug, Clone)] -pub struct FlashBlockCompleteSequence { - inner: Vec, - /// Optional execution outcome from building the flashblock sequence - execution_outcome: Option, -} - -impl FlashBlockCompleteSequence { - /// Create a complete sequence from a vector of flashblocks. - /// Ensure that: - /// * vector is not empty - /// * first flashblock have the base payload - /// * sequence of flashblocks is sound (successive index from 0, same payload id, ...) - pub fn new( - blocks: Vec, - execution_outcome: Option, - ) -> eyre::Result { - let first_block = blocks.first().ok_or_eyre("No flashblocks in sequence")?; - - // Ensure that first flashblock have base - first_block.base.as_ref().ok_or_eyre("Flashblock at index 0 has no base")?; - - // Ensure that index are successive from 0, have same block number and payload id - if !blocks.iter().enumerate().all(|(idx, block)| { - idx == block.index as usize - && block.payload_id == first_block.payload_id - && block.block_number() == first_block.block_number() - }) { - bail!("Flashblock inconsistencies detected in sequence"); - } - - Ok(Self { inner: blocks, execution_outcome }) - } - - /// Returns the block number - pub fn block_number(&self) -> u64 { - self.inner.first().unwrap().block_number() - } - - /// Returns the payload base of the first flashblock. - pub fn payload_base(&self) -> &OpFlashblockPayloadBase { - self.inner.first().unwrap().base.as_ref().unwrap() - } - - /// Returns the payload id shared by all flashblocks in the sequence. - pub fn payload_id(&self) -> PayloadId { - self.inner.first().unwrap().payload_id - } - - /// Returns the number of flashblocks in the sequence. - pub const fn count(&self) -> usize { - self.inner.len() - } - - /// Returns the last flashblock in the sequence. - pub fn last(&self) -> &FlashBlock { - self.inner.last().unwrap() - } - - /// Returns the execution outcome of the sequence. - pub const fn execution_outcome(&self) -> Option { - self.execution_outcome - } - - /// Updates execution outcome of the sequence. - pub const fn set_execution_outcome( - &mut self, - execution_outcome: Option, - ) { - self.execution_outcome = execution_outcome; - } - - /// Returns all transactions from all flashblocks in the sequence - pub fn all_transactions(&self) -> Vec { - self.inner.iter().flat_map(|fb| fb.diff.transactions.iter().cloned()).collect() - } -} - -impl Deref for FlashBlockCompleteSequence { - type Target = Vec; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -impl TryFrom for FlashBlockCompleteSequence { - type Error = eyre::Error; - fn try_from(sequence: FlashBlockPendingSequence) -> Result { - Self::new(sequence.inner.into_values().collect(), sequence.execution_outcome) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_utils::TestFlashBlockFactory; - - mod pending_sequence_insert { - use super::*; - - #[test] - fn test_insert_index_zero_creates_new_sequence() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).build(); - let payload_id = fb0.payload_id; - - sequence.insert(fb0); - - assert_eq!(sequence.count(), 1); - assert_eq!(sequence.block_number(), Some(100)); - assert_eq!(sequence.payload_id(), Some(payload_id)); - } - - #[test] - fn test_insert_followup_same_block_and_payload() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - sequence.insert(fb0.clone()); - - let fb1 = factory.flashblock_after(&fb0).build(); - sequence.insert(fb1.clone()); - - let fb2 = factory.flashblock_after(&fb1).build(); - sequence.insert(fb2); - - assert_eq!(sequence.count(), 3); - assert_eq!(sequence.index(), Some(2)); - } - - #[test] - fn test_insert_ignores_different_block_number() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - sequence.insert(fb0.clone()); - - // Try to insert followup with different block number - let fb1 = factory.flashblock_after(&fb0).block_number(101).build(); - sequence.insert(fb1); - - assert_eq!(sequence.count(), 1); - assert_eq!(sequence.block_number(), Some(100)); - } - - #[test] - fn test_insert_ignores_different_payload_id() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - let payload_id1 = fb0.payload_id; - sequence.insert(fb0.clone()); - - // Try to insert followup with different payload_id - let payload_id2 = alloy_rpc_types_engine::PayloadId::new([2u8; 8]); - let fb1 = factory.flashblock_after(&fb0).payload_id(payload_id2).build(); - sequence.insert(fb1); - - assert_eq!(sequence.count(), 1); - assert_eq!(sequence.payload_id(), Some(payload_id1)); - } - - #[test] - fn test_insert_maintains_btree_order() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - sequence.insert(fb0.clone()); - - let fb2 = factory.flashblock_after(&fb0).index(2).build(); - sequence.insert(fb2); - - let fb1 = factory.flashblock_after(&fb0).build(); - sequence.insert(fb1); - - let indices: Vec = sequence.flashblocks().map(|fb| fb.index).collect(); - assert_eq!(indices, vec![0, 1, 2]); - } - } - - mod pending_sequence_finalize { - use super::*; - - #[test] - fn test_finalize_empty_sequence_fails() { - let mut sequence = FlashBlockPendingSequence::new(); - let result = sequence.finalize(); - - assert!(result.is_err()); - assert_eq!( - result.unwrap_err().to_string(), - "Cannot finalize empty flashblock sequence" - ); - } - - #[test] - fn test_finalize_clears_pending_state() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - sequence.insert(fb0); - - assert_eq!(sequence.count(), 1); - - let _complete = sequence.finalize().unwrap(); - - // After finalize, sequence should be empty - assert_eq!(sequence.count(), 0); - assert_eq!(sequence.block_number(), None); - } - - #[test] - fn test_finalize_preserves_execution_outcome() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - sequence.insert(fb0); - - let outcome = - SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; - sequence.set_execution_outcome(Some(outcome)); - - let complete = sequence.finalize().unwrap(); - - assert_eq!(complete.execution_outcome(), Some(outcome)); - } - - #[test] - fn test_finalize_clears_cached_reads() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - sequence.insert(fb0); - - let cached_reads = CachedReads::default(); - sequence.set_cached_reads(cached_reads); - assert!(sequence.take_cached_reads().is_some()); - - let _complete = sequence.finalize().unwrap(); - - // Cached reads should be cleared - assert!(sequence.take_cached_reads().is_none()); - } - - #[test] - fn test_finalize_multiple_times_after_refill() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - // First sequence - let fb0 = factory.flashblock_at(0).build(); - sequence.insert(fb0); - - let complete1 = sequence.finalize().unwrap(); - assert_eq!(complete1.count(), 1); - - // Add new sequence for next block - let fb1 = factory.flashblock_for_next_block(&complete1.last().clone()).build(); - sequence.insert(fb1); - - let complete2 = sequence.finalize().unwrap(); - assert_eq!(complete2.count(), 1); - assert_eq!(complete2.block_number(), 101); - } - } - - mod complete_sequence_invariants { - use super::*; - - #[test] - fn test_new_empty_sequence_fails() { - let result = FlashBlockCompleteSequence::new(vec![], None); - assert!(result.is_err()); - assert_eq!(result.unwrap_err().to_string(), "No flashblocks in sequence"); - } - - #[test] - fn test_new_requires_base_at_index_zero() { - let factory = TestFlashBlockFactory::new(); - // Use builder() with index 1 first to create a flashblock, then change its index to 0 - // to bypass the auto-base creation logic - let mut fb0_no_base = factory.flashblock_at(1).build(); - fb0_no_base.index = 0; - fb0_no_base.base = None; - - let result = FlashBlockCompleteSequence::new(vec![fb0_no_base], None); - assert!(result.is_err()); - assert_eq!(result.unwrap_err().to_string(), "Flashblock at index 0 has no base"); - } - - #[test] - fn test_new_validates_successive_indices() { - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - // Skip index 1, go straight to 2 - let fb2 = factory.flashblock_after(&fb0).index(2).build(); - - let result = FlashBlockCompleteSequence::new(vec![fb0, fb2], None); - assert!(result.is_err()); - assert_eq!( - result.unwrap_err().to_string(), - "Flashblock inconsistencies detected in sequence" - ); - } - - #[test] - fn test_new_validates_same_block_number() { - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - let fb1 = factory.flashblock_after(&fb0).block_number(101).build(); - - let result = FlashBlockCompleteSequence::new(vec![fb0, fb1], None); - assert!(result.is_err()); - assert_eq!( - result.unwrap_err().to_string(), - "Flashblock inconsistencies detected in sequence" - ); - } - - #[test] - fn test_new_validates_same_payload_id() { - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - let payload_id2 = alloy_rpc_types_engine::PayloadId::new([2u8; 8]); - let fb1 = factory.flashblock_after(&fb0).payload_id(payload_id2).build(); - - let result = FlashBlockCompleteSequence::new(vec![fb0, fb1], None); - assert!(result.is_err()); - assert_eq!( - result.unwrap_err().to_string(), - "Flashblock inconsistencies detected in sequence" - ); - } - - #[test] - fn test_new_valid_single_flashblock() { - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).build(); - - let result = FlashBlockCompleteSequence::new(vec![fb0], None); - assert!(result.is_ok()); - - let complete = result.unwrap(); - assert_eq!(complete.count(), 1); - assert_eq!(complete.block_number(), 100); - } - - #[test] - fn test_new_valid_multiple_flashblocks() { - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - let fb1 = factory.flashblock_after(&fb0).build(); - let fb2 = factory.flashblock_after(&fb1).build(); - - let result = FlashBlockCompleteSequence::new(vec![fb0, fb1, fb2], None); - assert!(result.is_ok()); - - let complete = result.unwrap(); - assert_eq!(complete.count(), 3); - assert_eq!(complete.last().index, 2); - } - - #[test] - fn test_all_transactions_aggregates_correctly() { - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory - .flashblock_at(0) - .transactions(vec![Bytes::from_static(&[1, 2, 3]), Bytes::from_static(&[4, 5, 6])]) - .build(); - - let fb1 = factory - .flashblock_after(&fb0) - .transactions(vec![Bytes::from_static(&[7, 8, 9])]) - .build(); - - let complete = FlashBlockCompleteSequence::new(vec![fb0, fb1], None).unwrap(); - let all_txs = complete.all_transactions(); - - assert_eq!(all_txs.len(), 3); - assert_eq!(all_txs[0], Bytes::from_static(&[1, 2, 3])); - assert_eq!(all_txs[1], Bytes::from_static(&[4, 5, 6])); - assert_eq!(all_txs[2], Bytes::from_static(&[7, 8, 9])); - } - - #[test] - fn test_payload_base_returns_first_block_base() { - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - let fb1 = factory.flashblock_after(&fb0).build(); - - let complete = FlashBlockCompleteSequence::new(vec![fb0.clone(), fb1], None).unwrap(); - - assert_eq!(complete.payload_base().block_number, fb0.base.unwrap().block_number); - } - - #[test] - fn test_execution_outcome_mutation() { - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).build(); - - let mut complete = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); - assert!(complete.execution_outcome().is_none()); - - let outcome = - SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; - complete.set_execution_outcome(Some(outcome)); - - assert_eq!(complete.execution_outcome(), Some(outcome)); - } - - #[test] - fn test_deref_provides_vec_access() { - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - let fb1 = factory.flashblock_after(&fb0).build(); - - let complete = FlashBlockCompleteSequence::new(vec![fb0, fb1], None).unwrap(); - - // Use deref to access Vec methods - assert_eq!(complete.len(), 2); - assert!(!complete.is_empty()); - } - } - - mod sequence_conversion { - use super::*; - - #[test] - fn test_try_from_pending_to_complete_valid() { - let mut pending = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - pending.insert(fb0); - - let complete: Result = pending.try_into(); - assert!(complete.is_ok()); - assert_eq!(complete.unwrap().count(), 1); - } - - #[test] - fn test_try_from_pending_to_complete_empty_fails() { - let pending = FlashBlockPendingSequence::new(); - - let complete: Result = pending.try_into(); - assert!(complete.is_err()); - } - - #[test] - fn test_try_from_preserves_execution_outcome() { - let mut pending = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - pending.insert(fb0); - - let outcome = - SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; - pending.set_execution_outcome(Some(outcome)); - - let complete: FlashBlockCompleteSequence = pending.try_into().unwrap(); - assert_eq!(complete.execution_outcome(), Some(outcome)); - } - } - - mod pending_sequence_helpers { - use super::*; - - #[test] - fn test_last_flashblock_returns_highest_index() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - sequence.insert(fb0.clone()); - - let fb1 = factory.flashblock_after(&fb0).build(); - sequence.insert(fb1); - - let last = sequence.last_flashblock().unwrap(); - assert_eq!(last.index, 1); - } - - #[test] - fn test_subscribe_block_sequence_channel() { - let sequence = FlashBlockPendingSequence::new(); - let mut rx = sequence.subscribe_block_sequence(); - - // Spawn a task that sends a complete sequence - let tx = sequence.block_sequence_broadcaster().clone(); - std::thread::spawn(move || { - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).build(); - let complete = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); - let _ = tx.send(complete); - }); - - // Should receive the broadcast - let received = rx.blocking_recv(); - assert!(received.is_ok()); - assert_eq!(received.unwrap().count(), 1); - } - } -} From 9be3a7402b8868d6d13918c0a8f6404656183e8b Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 9 Mar 2026 19:16:45 +0800 Subject: [PATCH 05/76] feat(flashblocks): add pending sequence and canonical block handlers to state cache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.6 --- crates/flashblocks/src/cache/state.rs | 33 ++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/crates/flashblocks/src/cache/state.rs b/crates/flashblocks/src/cache/state.rs index 5868bc4d..7d4634d5 100644 --- a/crates/flashblocks/src/cache/state.rs +++ b/crates/flashblocks/src/cache/state.rs @@ -1,12 +1,12 @@ +use crate::cache::{confirm::ConfirmCache, pending::PendingSequence}; +use parking_lot::RwLock; use std::sync::Arc; +use alloy_consensus::BlockHeader; use alloy_primitives::B256; -use parking_lot::RwLock; use reth_primitives_traits::NodePrimitives; use reth_rpc_eth_types::block::BlockAndReceipts; -use super::{confirm::ConfirmCache, pending::PendingSequence}; - /// Top-level controller state cache for the flashblocks RPC layer. /// /// Composed of: @@ -40,10 +40,25 @@ impl StateCache { self.inner.write().handle_confirmed_block(block_number, block_hash, block) } + /// Handles updating the pending state with a newly executed pending flashblocks + /// sequence. Note that it will replace any existing pending sequence. + pub fn handle_pending_sequence(&self, pending_sequence: PendingSequence) { + self.inner.write().handle_pending_sequence(pending_sequence) + } + + pub fn handle_canonical_block(&self, block_number: u64, block_hash: B256) { + self.inner.write().handle_canonical_block(block_number, block_hash) + } + /// Returns the current confirmed cache height, if any blocks have been confirmed. pub fn get_confirm_height(&self) -> Option { self.inner.read().confirm_height } + + /// Returns the current pending height, if any flashblocks have been executed. + pub fn get_pending_height(&self) -> Option { + self.inner.read().pending.as_ref().map(|p| p.pending.block().number()) + } } /// Inner state of the flashblocks state cache. @@ -94,4 +109,16 @@ impl StateCacheInner { } Ok(()) } + + fn handle_pending_sequence(&mut self, pending_sequence: PendingSequence) { + self.pending = Some(pending_sequence); + } + + fn handle_canonical_block(&mut self, block_number: u64, block_hash: B256) { + self.canon_height = block_number; + self.confirm_cache.flush_up_to(block_number); + if self.pending.as_ref().map(|p| p.block_hash) == Some(block_hash) { + self.pending = None; + } + } } From 9a25e600233e2b7ffbe45853d83ec6cfdcaa4c4d Mon Sep 17 00:00:00 2001 From: Niven Date: Tue, 10 Mar 2026 17:50:39 +0800 Subject: [PATCH 06/76] feat(builder): revamp flashblocks caching layer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.6 --- Cargo.lock | 3 + Cargo.toml | 1 + crates/flashblocks/Cargo.toml | 3 + crates/flashblocks/src/cache/confirm.rs | 33 +- crates/flashblocks/src/cache/mod.rs | 5 + crates/flashblocks/src/cache/pending.rs | 26 +- crates/flashblocks/src/cache/raw.rs | 422 ++++-------------- crates/flashblocks/src/cache/state/block.rs | 160 +++++++ crates/flashblocks/src/cache/state/factory.rs | 77 ++++ crates/flashblocks/src/cache/state/header.rs | 89 ++++ crates/flashblocks/src/cache/state/id.rs | 119 +++++ .../src/cache/{state.rs => state/mod.rs} | 88 +++- crates/flashblocks/src/cache/state/receipt.rs | 58 +++ .../src/cache/state/transaction.rs | 82 ++++ crates/flashblocks/src/cache/state/utils.rs | 34 ++ crates/flashblocks/src/execution/cache.rs | 21 +- crates/flashblocks/src/execution/mod.rs | 5 +- crates/flashblocks/src/execution/worker.rs | 96 ++-- crates/flashblocks/src/test_utils.rs | 15 +- crates/flashblocks/src/ws/decoding.rs | 15 +- crates/flashblocks/src/ws/stream.rs | 22 +- 21 files changed, 894 insertions(+), 480 deletions(-) create mode 100644 crates/flashblocks/src/cache/state/block.rs create mode 100644 crates/flashblocks/src/cache/state/factory.rs create mode 100644 crates/flashblocks/src/cache/state/header.rs create mode 100644 crates/flashblocks/src/cache/state/id.rs rename crates/flashblocks/src/cache/{state.rs => state/mod.rs} (55%) create mode 100644 crates/flashblocks/src/cache/state/receipt.rs create mode 100644 crates/flashblocks/src/cache/state/transaction.rs create mode 100644 crates/flashblocks/src/cache/state/utils.rs diff --git a/Cargo.lock b/Cargo.lock index 9b26481f..2dd0d503 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14264,6 +14264,8 @@ dependencies = [ "op-revm", "parking_lot", "reth-chain-state", + "reth-chainspec", + "reth-db-models", "reth-errors", "reth-evm", "reth-execution-types", @@ -14293,6 +14295,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-tungstenite", + "tokio-util", "tracing", "url", "xlayer-builder", diff --git a/Cargo.toml b/Cargo.toml index f33a762d..2f866a99 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,6 +85,7 @@ reth-cli-commands = { git = "https://github.com/okx/reth", rev = "b6a31f31af91ab reth-cli-util = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } reth-db = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } reth-db-api = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } +reth-db-models = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } reth-engine-primitives = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } reth-ethereum-forks = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } reth-evm = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } diff --git a/crates/flashblocks/Cargo.toml b/crates/flashblocks/Cargo.toml index 30cf9e2a..3c47a756 100644 --- a/crates/flashblocks/Cargo.toml +++ b/crates/flashblocks/Cargo.toml @@ -15,6 +15,8 @@ xlayer-builder.workspace = true # reth reth-chain-state = { workspace = true, features = ["serde"] } +reth-chainspec.workspace = true +reth-db-models.workspace = true reth-errors.workspace = true reth-evm.workspace = true reth-execution-types = { workspace = true, features = ["serde"] } @@ -53,6 +55,7 @@ futures-util.workspace = true tokio.workspace = true tokio-stream.workspace = true tokio-tungstenite.workspace = true +tokio-util.workspace = true # rpc jsonrpsee.workspace = true diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index 30b0f599..293aaff3 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -7,9 +7,9 @@ use reth_rpc_eth_types::block::BlockAndReceipts; const DEFAULT_CONFIRM_CACHE_SIZE: usize = 5_000; -/// Confirmed flashblocks sequence cache that is ahead of the current -/// canonical chain. We optimistically commit confirmed flashblocks sequences to -/// the cache and flush them when the canonical chain catches up. +/// Confirmed flashblocks sequence cache that is ahead of the current node's canonical +/// chainstate. We optimistically commit confirmed flashblocks sequences to the cache +/// and flush them when the canonical chainstate catches up. /// /// Block data is stored in a `BTreeMap` keyed by block number, enabling O(log n) /// range splits in [`flush_up_to`](Self::flush_up_to). A secondary `HashMap` @@ -69,15 +69,9 @@ impl ConfirmCache { Ok(()) } - /// Returns the confirmed block for the given block hash, if present. - pub fn get_by_hash(&self, block_hash: &B256) -> Option> { - let number = self.hash_to_number.get(block_hash)?; - self.blocks.get(number).map(|(_, block)| block.clone()) - } - - /// Returns the confirmed block for the given block number, if present. - pub fn get_by_number(&self, block_number: u64) -> Option> { - self.blocks.get(&block_number).map(|(_, block)| block.clone()) + /// Returns the block number for the given block hash, if cached. + pub fn number_for_hash(&self, block_hash: &B256) -> Option { + self.hash_to_number.get(block_hash).copied() } /// Returns the block hash for the given block number, if cached. @@ -85,6 +79,16 @@ impl ConfirmCache { self.blocks.get(&block_number).map(|(hash, _)| *hash) } + /// Returns the confirmed block for the given block hash, if present. + pub fn get_block_by_hash(&self, block_hash: &B256) -> Option> { + self.get_block_by_number(self.number_for_hash(block_hash)?) + } + + /// Returns the confirmed block for the given block number, if present. + pub fn get_block_by_number(&self, block_number: u64) -> Option> { + self.blocks.get(&block_number).map(|(_, block)| block.clone()) + } + /// Returns `true` if the cache contains a block with the given hash. pub fn contains_hash(&self, block_hash: &B256) -> bool { self.hash_to_number.contains_key(block_hash) @@ -134,11 +138,6 @@ impl ConfirmCache { count } - /// Returns the highest cached block number, or `None` if empty. - pub fn latest_block_number(&self) -> Option { - self.blocks.keys().next_back().copied() - } - /// Clears all entries. pub fn clear(&mut self) { self.blocks.clear(); diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 5b8e5a1b..8c68853e 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -2,3 +2,8 @@ pub(crate) mod confirm; pub(crate) mod pending; pub(crate) mod raw; pub(crate) mod state; + +pub(crate) use confirm::ConfirmCache; +pub(crate) use pending::PendingSequence; +pub(crate) use raw::RawFlashblocksCache; +pub use state::StateCache; diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs index a1d92546..d940416a 100644 --- a/crates/flashblocks/src/cache/pending.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -13,35 +13,34 @@ pub struct PendingSequence { /// Locally built full pending block of the latest flashblocks sequence. #[deref] pub pending: PendingBlock, - /// The current block hash of the latest flashblocks sequence. - pub block_hash: B256, + /// Cached reads from execution for reuse. + pub cached_reads: CachedReads, /// Parent hash of the built block (may be non-canonical or canonical). pub parent_hash: B256, /// The last flashblock index of the latest flashblocks sequence. pub last_flashblock_index: u64, - /// Cached reads from execution for reuse. - pub cached_reads: CachedReads, /// Whether the [`PendingFlashblockSequence`] has a properly computed stateroot. pub has_computed_state_root: bool, + /// The current block hash of the latest flashblocks sequence. `None` if state + /// root is not computed yet. + pub block_hash: Option, } impl PendingSequence { /// Create new pending flashblock. pub const fn new( pending: PendingBlock, - block_hash: B256, + cached_reads: CachedReads, parent_hash: B256, last_flashblock_index: u64, - cached_reads: CachedReads, - has_computed_state_root: bool, ) -> Self { Self { pending, - block_hash, + cached_reads, parent_hash, last_flashblock_index, - cached_reads, - has_computed_state_root, + has_computed_state_root: false, + block_hash: None, } } @@ -49,6 +48,13 @@ impl PendingSequence { pub fn computed_state_root(&self) -> Option { self.has_computed_state_root.then_some(self.pending.block().state_root()) } + + /// Sets the computed state root and block hash for the pending block. + pub fn set_state_root_and_block_hash(&mut self, pending: PendingBlock) { + self.pending = pending; + self.block_hash = Some(self.pending.block().hash()); + self.has_computed_state_root = true; + } } #[cfg(test)] diff --git a/crates/flashblocks/src/cache/raw.rs b/crates/flashblocks/src/cache/raw.rs index 50956021..d38d0f75 100644 --- a/crates/flashblocks/src/cache/raw.rs +++ b/crates/flashblocks/src/cache/raw.rs @@ -1,207 +1,110 @@ -//! Sequence cache management for flashblocks. -//! -//! The `SequenceManager` maintains a ring buffer of recently completed flashblock sequences -//! and intelligently selects which sequence to build based on the local chain tip. - -use crate::{ - execution::worker::BuildArgs, - types::pending_state::PendingBlockState, - types::sequence::{FlashBlockPendingSequence, SequenceExecutionOutcome}, - validation::{ - CanonicalBlockFingerprint, CanonicalBlockReconciler, ReconciliationStrategy, ReorgDetector, - TrackedBlockFingerprint, - }, - FlashBlock, FlashBlockCompleteSequence, PendingFlashBlock, -}; +use crate::execution::BuildArgs; +use parking_lot::RwLock; +use std::{collections::BTreeMap, sync::Arc}; +use tracing::*; + use alloy_eips::eip2718::WithEncoded; use alloy_primitives::B256; use alloy_rpc_types_engine::PayloadId; +use op_alloy_rpc_types_engine::OpFlashblockPayload; + use reth_primitives_traits::{ transaction::TxHashRef, NodePrimitives, Recovered, SignedTransaction, }; -use reth_revm::cached::CachedReads; -use ringbuffer::{AllocRingBuffer, RingBuffer}; -use std::collections::{BTreeMap, HashSet}; -use tokio::sync::broadcast; -use tracing::*; - -/// Maximum number of cached sequences in the ring buffer. -const CACHE_SIZE: usize = 3; -/// 200 ms flashblock time. -pub(crate) const FLASHBLOCK_BLOCK_TIME: u64 = 200; - -/// Stable identity for a tracked flashblock sequence. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub(crate) struct SequenceId { - pub(crate) block_number: u64, - pub(crate) payload_id: PayloadId, - pub(crate) parent_hash: B256, -} - -impl SequenceId { - fn from_pending(sequence: &FlashBlockPendingSequence) -> Option { - let base = sequence.payload_base()?; - let payload_id = sequence.payload_id()?; - Some(Self { block_number: base.block_number, payload_id, parent_hash: base.parent_hash }) - } - - fn from_complete(sequence: &FlashBlockCompleteSequence) -> Self { - Self { - block_number: sequence.block_number(), - payload_id: sequence.payload_id(), - parent_hash: sequence.payload_base().parent_hash, - } - } -} - -/// Snapshot selector for build-completion matching. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -enum SequenceSnapshot { - Pending { revision: u64 }, - Cached, -} - -/// Opaque ticket that identifies the exact sequence snapshot selected for a build. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub(crate) struct BuildTicket { - sequence_id: SequenceId, - snapshot: SequenceSnapshot, -} - -impl BuildTicket { - const fn pending(sequence_id: SequenceId, revision: u64) -> Self { - Self { sequence_id, snapshot: SequenceSnapshot::Pending { revision } } - } - - const fn cached(sequence_id: SequenceId) -> Self { - Self { sequence_id, snapshot: SequenceSnapshot::Cached } - } -} - -/// Result of attempting to apply a build completion to tracked sequence state. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub(crate) enum BuildApplyOutcome { - SkippedNoBuildResult, - AppliedPending, - AppliedCached { - rebroadcasted: bool, - }, - RejectedPendingSequenceMismatch { - ticket_sequence_id: SequenceId, - current_sequence_id: Option, - }, - RejectedPendingRevisionStale { - sequence_id: SequenceId, - ticket_revision: u64, - current_revision: u64, - }, - RejectedCachedSequenceMissing { - sequence_id: SequenceId, - }, -} - -impl BuildApplyOutcome { - pub(crate) const fn is_applied(self) -> bool { - matches!(self, Self::AppliedPending | Self::AppliedCached { .. }) - } -} - -/// A buildable sequence plus the stable identity that selected it. -pub(crate) struct BuildCandidate { - pub(crate) ticket: BuildTicket, - pub(crate) args: BuildArgs, -} - -impl std::ops::Deref for BuildCandidate { - type Target = BuildArgs; - - fn deref(&self) -> &Self::Target { - &self.args - } -} -/// In-progress pending sequence state. -/// -/// Keeps accepted flashblocks and recovered transactions in lockstep by index. +/// Raw flashblocks sequence keeps track of the flashblocks sequence based on their +/// `payload_id`. #[derive(Debug)] -struct PendingSequence { - sequence: FlashBlockPendingSequence, +struct RawFlashblocksSequence { + /// Tracks the individual flashblocks in order + inner: BTreeMap, + /// Tracks the recovered transactions by index recovered_transactions_by_index: BTreeMap>>>, + /// Tracks if the accumulated sequence has received the first base flashblock + has_base: bool, + /// Tracks the revision of the sequence revision: u64, + /// Tracks the revision that has been applied to the state cache applied_revision: Option, } -impl PendingSequence { +impl RawFlashblocksSequence { fn new() -> Self { Self { - sequence: FlashBlockPendingSequence::new(), + inner: BTreeMap::new(), recovered_transactions_by_index: BTreeMap::new(), + has_base: false, revision: 0, applied_revision: None, } } - const fn sequence(&self) -> &FlashBlockPendingSequence { - &self.sequence - } + /// Inserts a flashblock into the sequence. + fn insert_flashblock(&mut self, flashblock: OpFlashblockPayload) -> eyre::Result<()> { + if !self.can_accept(&flashblock) { + return Err(eyre::eyre!("flashblock does not match current sequence id")); + } - fn count(&self) -> usize { - self.sequence.count() - } + if flashblock.index == 0 { + // Base flashblock received + self.has_base = true; + } - const fn revision(&self) -> u64 { - self.revision + // Only recover transactions once we've validated that this flashblock is accepted. + let flashblock_index = flashblock.index; + let recovered_txs = flashblock.recover_transactions().collect::, _>>()?; + self.inner.insert(flashblock_index, flashblock); + self.recovered_transactions_by_index.insert(flashblock_index, recovered_txs); + self.bump_revision(); + Ok(()) } - fn clear(&mut self) { - self.sequence = FlashBlockPendingSequence::new(); - self.recovered_transactions_by_index.clear(); - self.applied_revision = None; + /// Returns whether this flashblock would be accepted into the current sequence. + fn can_accept(&self, flashblock: &OpFlashblockPayload) -> bool { + if flashblock.index == 0 && !self.has_base { + return true; + } + return self.block_number() == Some(flashblock.block_number()) + && self.payload_id() == Some(flashblock.payload_id); } - const fn bump_revision(&mut self) { - self.revision = self.revision.wrapping_add(1); + /// Returns the first block number + pub fn block_number(&self) -> Option { + Some(self.inner.values().next()?.block_number()) } - fn is_revision_applied(&self, revision: u64) -> bool { - self.applied_revision == Some(revision) + /// Returns the payload id of the first tracked flashblock in the current sequence. + pub fn payload_id(&self) -> Option { + Some(self.inner.values().next()?.payload_id) } - const fn mark_revision_applied(&mut self, revision: u64) { - self.applied_revision = Some(revision); + fn count(&self) -> usize { + self.inner.len() } - fn insert_flashblock(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { - if !self.sequence.can_accept(&flashblock) { - self.sequence.insert(flashblock); - return Ok(()); - } + const fn revision(&self) -> u64 { + self.revision + } - // Only recover transactions once we've validated that this flashblock is accepted. - let recovered_txs = flashblock.recover_transactions().collect::, _>>()?; - let flashblock_index = flashblock.index; + fn bump_revision(&mut self) { + // Iterate over the inner map and increment the revision for consecutive flashblocks + let mut new_revision = 0; + for (index, _) in self.inner.iter() { + if *index == 0 { + continue; + } - // Index 0 starts a fresh pending block, so clear any stale in-progress data. - if flashblock_index == 0 { - self.clear(); + // If the index is not consecutive, break the loop + if new_revision != *index - 1 { + break; + } + new_revision = *index; } - - self.sequence.insert(flashblock); - self.recovered_transactions_by_index.insert(flashblock_index, recovered_txs); - self.bump_revision(); - Ok(()) + self.revision = new_revision; } - fn finalize( - &mut self, - ) -> eyre::Result<(FlashBlockCompleteSequence, Vec>>)> { - let finalized = self.sequence.finalize(); - let recovered_by_index = std::mem::take(&mut self.recovered_transactions_by_index); - - match finalized { - Ok(completed) => Ok((completed, recovered_by_index.into_values().flatten().collect())), - Err(err) => Err(err), - } + const fn mark_revision_applied(&mut self, revision: u64) { + self.applied_revision = Some(revision); } fn transactions(&self) -> Vec>> { @@ -218,182 +121,31 @@ impl PendingSequence { } } -/// Manages flashblock sequences with caching support. +type RawFlashblocksCacheInner = + BTreeMap>; + +/// The raw flashblocks sequence cache for new incoming flashblocks from the sequencer. +/// The cache accumulates last two flashblocks sequences in memory, to handle scenario +/// when flashblocks received are out-of-order, and committing the previous sequence +/// state to the state cache is not yet possible due to parent hash mismatch (we still +/// need the previous flashblocks sequence to compute the state root). /// -/// This struct handles: -/// - Tracking the current pending sequence -/// - Caching completed sequences in a fixed-size ring buffer -/// - Finding the best sequence to build based on local chain tip -/// - Broadcasting completed sequences to subscribers -#[derive(Debug)] -pub(crate) struct SequenceManager { - /// Current pending sequence being built up from incoming flashblocks - pending: PendingSequence, - /// Ring buffer of recently completed sequences bundled with their decoded transactions (FIFO, - /// size 3) - completed_cache: AllocRingBuffer<(FlashBlockCompleteSequence, Vec>>)>, - /// Cached sequence identities that already had a build completion applied. - applied_cached_sequences: HashSet, - /// Cached minimum block number currently present in `completed_cache`. - cached_min_block_number: Option, - /// Broadcast channel for completed sequences - block_broadcaster: broadcast::Sender, - /// Whether to compute state roots when building blocks - compute_state_root: bool, +/// The raw cache is used to: +/// 1. Track the next best sequence to build, based on cache state (consecutive flashblocks +/// required) +/// 2. Re-org detection when a new flashblock is received +pub struct RawFlashblocksCache { + inner: Arc>>, } -impl SequenceManager { - /// Creates a new sequence manager. - pub(crate) fn new(compute_state_root: bool) -> Self { - let (block_broadcaster, _) = broadcast::channel(128); - Self { - pending: PendingSequence::new(), - completed_cache: AllocRingBuffer::new(CACHE_SIZE), - applied_cached_sequences: HashSet::new(), - cached_min_block_number: None, - block_broadcaster, - compute_state_root, - } - } - - /// Returns the sender half of the flashblock sequence broadcast channel. - pub(crate) const fn block_sequence_broadcaster( - &self, - ) -> &broadcast::Sender { - &self.block_broadcaster - } - - /// Gets a subscriber to the flashblock sequences produced. - pub(crate) fn subscribe_block_sequence(&self) -> crate::FlashBlockCompleteSequenceRx { - self.block_broadcaster.subscribe() - } - - /// Inserts a new flashblock into the pending sequence. - /// - /// When a flashblock with index 0 arrives (indicating a new block), the current - /// pending sequence is finalized, cached, and broadcast immediately. If the sequence - /// is later built on top of local tip, `on_build_complete()` will broadcast again - /// with computed `state_root`. - /// - /// Transactions are recovered once and cached for reuse during block building. - pub(crate) fn insert_flashblock(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { - // If this starts a new block, finalize and cache the previous sequence BEFORE inserting - if flashblock.index == 0 && self.pending.count() > 0 { - let (completed, txs) = self.pending.finalize()?; - let block_number = completed.block_number(); - let parent_hash = completed.payload_base().parent_hash; - - trace!( - target: "flashblocks", - block_number, - %parent_hash, - cache_size = self.completed_cache.len(), - "Caching completed flashblock sequence" - ); - - // Broadcast immediately to consensus client (even without state_root) - // This ensures sequences are forwarded during catch-up even if not buildable on tip. - // ConsensusClient checks execution_outcome and skips newPayload if state_root is zero. - if self.block_broadcaster.receiver_count() > 0 { - let _ = self.block_broadcaster.send(completed.clone()); - } - - // Bundle completed sequence with its decoded transactions and push to cache - // Ring buffer automatically evicts oldest entry when full - self.push_completed_sequence(completed, txs); - } - - self.pending.insert_flashblock(flashblock)?; - Ok(()) - } - - /// Pushes a completed sequence into the cache and maintains cached min block-number metadata. - fn push_completed_sequence( - &mut self, - completed: FlashBlockCompleteSequence, - txs: Vec>>, - ) { - let block_number = completed.block_number(); - let completed_sequence_id = SequenceId::from_complete(&completed); - let evicted_block_number = if self.completed_cache.is_full() { - self.completed_cache.front().map(|(seq, _)| seq.block_number()) - } else { - None - }; - let evicted_sequence_id = if self.completed_cache.is_full() { - self.completed_cache.front().map(|(seq, _)| SequenceId::from_complete(seq)) - } else { - None - }; - - if let Some(sequence_id) = evicted_sequence_id { - self.applied_cached_sequences.remove(&sequence_id); - } - // Re-tracking a sequence identity should always start as unapplied. - self.applied_cached_sequences.remove(&completed_sequence_id); - - self.completed_cache.enqueue((completed, txs)); - - self.cached_min_block_number = match self.cached_min_block_number { - None => Some(block_number), - Some(current_min) if block_number < current_min => Some(block_number), - Some(current_min) if Some(current_min) == evicted_block_number => { - self.recompute_cache_min_block_number() - } - Some(current_min) => Some(current_min), - }; - } - - /// Recomputes the minimum block number in `completed_cache`. - fn recompute_cache_min_block_number(&self) -> Option { - self.completed_cache.iter().map(|(seq, _)| seq.block_number()).min() - } - - /// Returns the newest cached sequence that matches `parent_hash` and still needs execution. - /// - /// Cached sequences that already had build completion applied are skipped to avoid redundant - /// rebuild loops. - fn newest_unexecuted_cached_for_parent( - &self, - parent_hash: B256, - ) -> Option<&(FlashBlockCompleteSequence, Vec>>)> { - self.completed_cache.iter().rev().find(|(seq, _)| { - let sequence_id = SequenceId::from_complete(seq); - seq.payload_base().parent_hash == parent_hash - && !self.applied_cached_sequences.contains(&sequence_id) - }) - } - - /// Returns a mutable cached sequence entry by exact sequence identity. - fn cached_entry_mut_by_id( - &mut self, - sequence_id: SequenceId, - ) -> Option<&mut (FlashBlockCompleteSequence, Vec>>)> { - self.completed_cache - .iter_mut() - .find(|(seq, _)| SequenceId::from_complete(seq) == sequence_id) - } - - /// Returns the current pending sequence for inspection. - pub(crate) const fn pending(&self) -> &FlashBlockPendingSequence { - self.pending.sequence() - } - - /// Finds the next sequence to build and returns the selected sequence identity - /// with ready-to-use `BuildArgs`. - /// - /// Priority order: - /// 1. Current pending sequence (if parent matches local tip) - /// 2. Cached sequence with exact parent match - /// 3. Speculative: pending sequence with pending parent state (if provided) - /// - /// Returns None if nothing is buildable right now. +impl RawFlashblocksCache { + /// Gets the next buildable sequence from the cache, returns None if no buildable + /// sequence is found. pub(crate) fn next_buildable_args>( &mut self, local_tip_hash: B256, local_tip_timestamp: u64, - pending_parent_state: Option>, - ) -> Option>>, N>> { + ) -> Option>>, N>> { // Try to find a buildable sequence: (ticket, base, last_fb, transactions, // cached_state, source_name, pending_parent) let (ticket, base, last_flashblock, transactions, cached_state, source_name, pending_parent) = diff --git a/crates/flashblocks/src/cache/state/block.rs b/crates/flashblocks/src/cache/state/block.rs new file mode 100644 index 00000000..ba0f1447 --- /dev/null +++ b/crates/flashblocks/src/cache/state/block.rs @@ -0,0 +1,160 @@ +use super::{ + utils::{block_from_bar, StateCacheProvider}, + StateCache, +}; + +use alloy_eips::{BlockHashOrNumber, BlockId}; +use alloy_primitives::{BlockNumber, TxNumber, B256}; +use core::ops::RangeInclusive; +use reth_db_models::StoredBlockBodyIndices; +use reth_primitives_traits::{BlockTy, NodePrimitives, RecoveredBlock, SealedHeader}; +use reth_storage_api::{ + errors::provider::ProviderResult, BlockBodyIndicesProvider, BlockReader, BlockReaderIdExt, + BlockSource, HeaderProvider, TransactionVariant, +}; + +impl> BlockReader for StateCache { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { + if let Some(bar) = self.inner.read().confirm_cache.get_block_by_hash(&hash) { + return Ok(Some(block_from_bar(&bar))); + } + self.provider.find_block_by_hash(hash, source) + } + + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + let cached = match id { + BlockHashOrNumber::Hash(hash) => { + self.inner.read().confirm_cache.get_block_by_hash(&hash) + } + BlockHashOrNumber::Number(num) => { + self.inner.read().confirm_cache.get_block_by_number(num) + } + }; + if let Some(bar) = cached { + return Ok(Some(block_from_bar(&bar))); + } + self.provider.block(id) + } + + fn pending_block(&self) -> ProviderResult>> { + { + let inner = self.inner.read(); + if let Some(pending) = &inner.pending { + return Ok(Some(pending.pending.block().as_ref().clone())); + } + } + self.provider.pending_block() + } + + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { + { + let inner = self.inner.read(); + if let Some(pending) = &inner.pending { + let block = pending.pending.block().as_ref().clone(); + let receipts = pending.pending.receipts.as_ref().clone(); + return Ok(Some((block, receipts))); + } + } + self.provider.pending_block_and_receipts() + } + + fn recovered_block( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + let cached = match id { + BlockHashOrNumber::Hash(hash) => { + self.inner.read().confirm_cache.get_block_by_hash(&hash) + } + BlockHashOrNumber::Number(num) => { + self.inner.read().confirm_cache.get_block_by_number(num) + } + }; + if let Some(bar) = cached { + return Ok(Some((*bar.block).clone())); + } + self.provider.recovered_block(id, transaction_kind) + } + + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + self.recovered_block(id, transaction_kind) + } + + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + self.provider.block_range(range) + } + + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + self.provider.block_with_senders_range(range) + } + + fn recovered_block_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + self.provider.recovered_block_range(range) + } + + fn block_by_transaction_id(&self, id: TxNumber) -> ProviderResult> { + self.provider.block_by_transaction_id(id) + } +} + +impl> BlockReaderIdExt + for StateCache +{ + fn block_by_id(&self, id: BlockId) -> ProviderResult> { + match id { + BlockId::Hash(hash) => self.block_by_hash(hash.into()), + BlockId::Number(num) => self.block_by_number_or_tag(num), + } + } + + fn sealed_header_by_id( + &self, + id: BlockId, + ) -> ProviderResult>> { + match id { + BlockId::Hash(hash) => self.sealed_header_by_hash(hash.into()), + BlockId::Number(tag) => self.sealed_header_by_number_or_tag(tag), + } + } + + fn header_by_id(&self, id: BlockId) -> ProviderResult> { + match id { + BlockId::Hash(hash) => self.header(hash.into()), + BlockId::Number(num) => self.header_by_number_or_tag(num), + } + } +} + +impl> BlockBodyIndicesProvider + for StateCache +{ + fn block_body_indices(&self, num: u64) -> ProviderResult> { + self.provider.block_body_indices(num) + } + + fn block_body_indices_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.provider.block_body_indices_range(range) + } +} diff --git a/crates/flashblocks/src/cache/state/factory.rs b/crates/flashblocks/src/cache/state/factory.rs new file mode 100644 index 00000000..217a0641 --- /dev/null +++ b/crates/flashblocks/src/cache/state/factory.rs @@ -0,0 +1,77 @@ +use super::{utils::StateCacheProvider, StateCache}; + +use alloy_eips::BlockNumberOrTag; +use alloy_primitives::{BlockNumber, B256}; +use reth_primitives_traits::NodePrimitives; +use reth_storage_api::{errors::provider::ProviderResult, StateProviderBox, StateProviderFactory}; + +impl> StateProviderFactory + for StateCache +{ + fn latest(&self) -> ProviderResult { + // Determine effective latest: if confirm cache is strictly ahead of the + // provider's best block, use the confirmed block's hash to resolve state + // from the engine tree. Otherwise, use the provider's latest. + let provider_best = self.provider.best_block_number()?; + let inner = self.inner.read(); + if let Some(confirm_height) = inner.confirm_height { + if confirm_height > provider_best { + if let Some(hash) = inner.confirm_cache.hash_for_number(confirm_height) { + drop(inner); + return self.provider.state_by_block_hash(hash); + } + } + } + drop(inner); + self.provider.latest() + } + + fn state_by_block_number_or_tag( + &self, + number_or_tag: BlockNumberOrTag, + ) -> ProviderResult { + match number_or_tag { + BlockNumberOrTag::Latest => self.latest(), + BlockNumberOrTag::Pending => self.pending(), + other => self.provider.state_by_block_number_or_tag(other), + } + } + + fn history_by_block_number(&self, block: BlockNumber) -> ProviderResult { + // If the requested block is in the confirm cache (ahead of canonical), + // resolve via hash so the engine tree can serve it. + if let Some(hash) = self.inner.read().confirm_cache.hash_for_number(block) { + return self.provider.state_by_block_hash(hash); + } + self.provider.history_by_block_number(block) + } + + fn history_by_block_hash(&self, block: B256) -> ProviderResult { + // If the hash is in our confirm cache, route through `state_by_block_hash` + // which also covers the engine tree's in-memory state. + if self.inner.read().confirm_cache.contains_hash(&block) { + return self.provider.state_by_block_hash(block); + } + self.provider.history_by_block_hash(block) + } + + fn state_by_block_hash(&self, block: B256) -> ProviderResult { + self.provider.state_by_block_hash(block) + } + + fn pending(&self) -> ProviderResult { + // Delegate to the underlying provider. The engine tree should have the + // pending block's world state if it has been submitted via engine API. + // Building a custom state overlay from the `PendingSequence`'s + // `ExecutedBlock` is a future enhancement. + self.provider.pending() + } + + fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult> { + self.provider.pending_state_by_hash(block_hash) + } + + fn maybe_pending(&self) -> ProviderResult> { + self.provider.maybe_pending() + } +} diff --git a/crates/flashblocks/src/cache/state/header.rs b/crates/flashblocks/src/cache/state/header.rs new file mode 100644 index 00000000..2ff52b0e --- /dev/null +++ b/crates/flashblocks/src/cache/state/header.rs @@ -0,0 +1,89 @@ +use super::{utils::StateCacheProvider, StateCache}; + +use alloy_primitives::{BlockNumber, B256}; +use core::ops::RangeBounds; +use reth_primitives_traits::{HeaderTy, NodePrimitives, SealedHeader}; +use reth_storage_api::{errors::provider::ProviderResult, HeaderProvider}; + +impl> HeaderProvider + for StateCache +{ + type Header = HeaderTy; + + fn header(&self, block_hash: B256) -> ProviderResult> { + if let Some(bar) = self.inner.read().confirm_cache.get_block_by_hash(&block_hash) { + return Ok(Some(bar.block.header().clone())); + } + // Cache miss, delegate to the provider + self.provider.header(block_hash) + } + + fn header_by_number(&self, num: u64) -> ProviderResult> { + if let Some(bar) = self.inner.read().confirm_cache.get_block_by_number(num) { + return Ok(Some(bar.block.header().clone())); + } + // Cache miss, delegate to the provider + self.provider.header_by_number(num) + } + + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + let (start, end) = self.resolve_range_bounds(range)?; + if start > end { + return Ok(Vec::new()); + } + + let inner = self.inner.read(); + + // Walk backwards from the end of the inclusive range, collecting + // consecutive cache hits from the confirm cache tail. + let mut cache_headers = Vec::new(); + let mut provider_end = end; + let mut index = end; + loop { + if let Some(bar) = inner.confirm_cache.get_block_by_number(index) { + cache_headers.push(bar.block.header().clone()); + provider_end = index.saturating_sub(1); + } else { + break; + } + if index == start { + break; + } + index -= 1; + } + cache_headers.reverse(); + drop(inner); + + // Delegate the remaining prefix [start..=provider_end] to the provider. + let mut result = + if provider_end >= start && cache_headers.len() < (end - start + 1) as usize { + self.provider.headers_range(start..=provider_end)? + } else { + Vec::new() + }; + result.extend(cache_headers); + Ok(result) + } + + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>> { + if let Some(bar) = self.inner.read().confirm_cache.get_block_by_number(number) { + return Ok(Some(bar.block.sealed_header().clone())); + } + // Cache miss, delegate to the provider + self.provider.sealed_header(number) + } + + fn sealed_headers_while( + &self, + range: impl RangeBounds, + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { + self.provider.sealed_headers_while(range, predicate) + } +} diff --git a/crates/flashblocks/src/cache/state/id.rs b/crates/flashblocks/src/cache/state/id.rs new file mode 100644 index 00000000..e647959c --- /dev/null +++ b/crates/flashblocks/src/cache/state/id.rs @@ -0,0 +1,119 @@ +use super::{utils::StateCacheProvider, StateCache}; + +use alloy_consensus::BlockHeader; +use alloy_eips::BlockNumHash; +use alloy_primitives::{BlockNumber, B256}; +use reth_chainspec::ChainInfo; +use reth_primitives_traits::NodePrimitives; +use reth_storage_api::{ + errors::provider::ProviderResult, BlockHashReader, BlockIdReader, BlockNumReader, +}; + +impl> BlockHashReader + for StateCache +{ + fn block_hash(&self, number: BlockNumber) -> ProviderResult> { + if let Some(hash) = self.inner.read().confirm_cache.hash_for_number(number) { + return Ok(Some(hash)); + } + // Cache miss, delegate to the provider + self.provider.block_hash(number) + } + + fn canonical_hashes_range( + &self, + start: BlockNumber, + end: BlockNumber, + ) -> ProviderResult> { + if start >= end { + // Aligns with underlying blockchain provider + return Ok(Vec::new()); + } + + let inner = self.inner.read(); + let mut cache_hashes = Vec::new(); + let mut provider_end = end; + let mut index = end - 1; + loop { + if let Some(hash) = inner.confirm_cache.hash_for_number(index) { + cache_hashes.push(hash); + provider_end = index; + } else { + break; + } + // Guard against underflow when index == 0, and stop once we've + // covered the full range down to `start` + if index == start { + break; + } + index -= 1; + } + cache_hashes.reverse(); + drop(inner); + + // Delegate the remaining prefix [start, provider_end) to the provider + let mut result = if provider_end > start { + self.provider.canonical_hashes_range(start, provider_end)? + } else { + Vec::new() + }; + result.extend(cache_hashes); + Ok(result) + } +} + +impl> BlockNumReader + for StateCache +{ + fn chain_info(&self) -> ProviderResult { + let mut info = self.provider.chain_info()?; + let inner = self.inner.read(); + if let Some(h) = inner.confirm_height + && h > info.best_number + && let Some(hash) = inner.confirm_cache.hash_for_number(h) + { + info.best_number = h; + info.best_hash = hash; + } + Ok(info) + } + + fn best_block_number(&self) -> ProviderResult { + let provider_height = self.provider.best_block_number()?; + // If confirm cache is strictly ahead, report that. On tie, prefer provider + Ok(self.inner.read().confirm_height.map_or(provider_height, |h| h.max(provider_height))) + } + + fn last_block_number(&self) -> ProviderResult { + self.provider.last_block_number() + } + + fn block_number(&self, hash: B256) -> ProviderResult> { + if let Some(num) = self.inner.read().confirm_cache.number_for_hash(&hash) { + return Ok(Some(num)); + } + // Cache miss, delegate to the provider + self.provider.block_number(hash) + } +} + +impl> BlockIdReader for StateCache { + fn pending_block_num_hash(&self) -> ProviderResult> { + let inner = self.inner.read(); + if let Some(pending) = &inner.pending { + let block = pending.pending.block(); + return Ok(Some(BlockNumHash::new(block.number(), block.hash()))); + } + drop(inner); + // Cache miss, delegate to the provider + self.provider.pending_block_num_hash() + } + + fn safe_block_num_hash(&self) -> ProviderResult> { + self.provider.safe_block_num_hash() + } + + fn finalized_block_num_hash(&self) -> ProviderResult> { + self.provider.finalized_block_num_hash() + } +} diff --git a/crates/flashblocks/src/cache/state.rs b/crates/flashblocks/src/cache/state/mod.rs similarity index 55% rename from crates/flashblocks/src/cache/state.rs rename to crates/flashblocks/src/cache/state/mod.rs index 7d4634d5..a2544a8d 100644 --- a/crates/flashblocks/src/cache/state.rs +++ b/crates/flashblocks/src/cache/state/mod.rs @@ -1,11 +1,22 @@ -use crate::cache::{confirm::ConfirmCache, pending::PendingSequence}; +mod block; +mod factory; +mod header; +mod id; +mod receipt; +mod transaction; +pub(crate) mod utils; + +use crate::cache::{ConfirmCache, PendingSequence}; use parking_lot::RwLock; use std::sync::Arc; +use utils::StateCacheProvider; use alloy_consensus::BlockHeader; -use alloy_primitives::B256; +use alloy_primitives::{BlockNumber, B256}; +use core::ops::RangeBounds; use reth_primitives_traits::NodePrimitives; use reth_rpc_eth_types::block::BlockAndReceipts; +use reth_storage_api::{errors::provider::ProviderResult, BlockNumReader}; /// Top-level controller state cache for the flashblocks RPC layer. /// @@ -15,18 +26,39 @@ use reth_rpc_eth_types::block::BlockAndReceipts; /// - **Confirmed**: completed flashblock sequences that have been committed but /// are still ahead of the canonical chain. /// +/// Implements all reth provider traits using the flashblocks state cache layer +/// as an overlay on top of the underlying chainstate `Provider`. +/// (`BlockReaderIdExt`, `StateProviderFactory`, etc.) +/// +/// **Lookup strategy:** +/// - **Confirmed state** (by hash/number): Check the flashblocks state cache +/// layer first, then fall back to the chainstate provider. +/// - **Latest**: Compare the flashblocks state cache's highest height vs the +/// chainstate provider's best height. Return whichever is higher, on tie we +/// prefer the chainstate provider. +/// - **Pending**: Returns the pending state from the flashblocks state cache. +/// - **All other IDs** (safe, finalized, historical, index-based): delegate +/// directly to the chainstate provider. +/// /// Uses `Arc` for thread safety — a single lock protects all inner /// state, ensuring atomic operations across pending, confirmed, and height /// state (e.g. reorg detection + flush + insert in `handle_confirmed_block`). #[derive(Debug, Clone)] -pub struct StateCache { +pub struct StateCache { inner: Arc>>, + provider: Provider, } -impl StateCache { +impl> StateCache { /// Creates a new [`StateCache`]. - pub fn new(canon_height: u64) -> Self { - Self { inner: Arc::new(RwLock::new(StateCacheInner::new(canon_height))) } + pub fn new(provider: Provider) -> eyre::Result { + let canon_height = provider.best_block_number()?; + Ok(Self { inner: Arc::new(RwLock::new(StateCacheInner::new(canon_height))), provider }) + } + + /// Returns a reference to the underlying chainstate provider. + pub const fn provider(&self) -> &Provider { + &self.provider } /// Handles a newly confirmed block by detecting reorgs, flushing invalidated @@ -59,6 +91,26 @@ impl StateCache { pub fn get_pending_height(&self) -> Option { self.inner.read().pending.as_ref().map(|p| p.pending.block().number()) } + + /// Resolves an `impl RangeBounds` into an inclusive `(start, end)` pair. + /// Matches reth's blockchain provider's convert_range_bounds semantics, and unbounded + /// ends are resolved to `best_block_number`. + fn resolve_range_bounds( + &self, + range: impl RangeBounds, + ) -> ProviderResult<(BlockNumber, BlockNumber)> { + let start = match range.start_bound() { + core::ops::Bound::Included(&n) => n, + core::ops::Bound::Excluded(&n) => n + 1, + core::ops::Bound::Unbounded => 0, + }; + let end = match range.end_bound() { + core::ops::Bound::Included(&n) => n, + core::ops::Bound::Excluded(&n) => n - 1, + core::ops::Bound::Unbounded => self.best_block_number()?, + }; + Ok((start, end)) + } } /// Inner state of the flashblocks state cache. @@ -91,22 +143,24 @@ impl StateCacheInner { block_hash: B256, block: BlockAndReceipts, ) -> eyre::Result<()> { + // Validation checks if let Some(confirm_height) = self.confirm_height { - // Reorg detection: incoming block is at or behind the last confirmed height. if block_number <= confirm_height { - self.confirm_cache.flush_from(block_number); + // Reorg detected - confirm cache is polluted + return Err(eyre::eyre!( + "polluted state cache - trying to commit lower confirm height block" + )); + } + if block_number != confirm_height + 1 { + return Err(eyre::eyre!( + "polluted state cache - not next consecutive confirm height block" + )); } } - self.confirm_cache.insert(block_number, block_hash, block)?; - - // Sanity check: the inserted block must now be the highest in the cache + // Commit new confirmed block to state cache self.confirm_height = Some(block_number); - if self.confirm_height != self.confirm_cache.latest_block_number() { - return Err(eyre::eyre!( - "confirmed cache latest height mismatch inserted block height: {block_number}" - )); - } + self.confirm_cache.insert(block_number, block_hash, block)?; Ok(()) } @@ -117,7 +171,7 @@ impl StateCacheInner { fn handle_canonical_block(&mut self, block_number: u64, block_hash: B256) { self.canon_height = block_number; self.confirm_cache.flush_up_to(block_number); - if self.pending.as_ref().map(|p| p.block_hash) == Some(block_hash) { + if self.pending.as_ref().and_then(|p| p.block_hash) == Some(block_hash) { self.pending = None; } } diff --git a/crates/flashblocks/src/cache/state/receipt.rs b/crates/flashblocks/src/cache/state/receipt.rs new file mode 100644 index 00000000..c81c6aaf --- /dev/null +++ b/crates/flashblocks/src/cache/state/receipt.rs @@ -0,0 +1,58 @@ +use super::{utils::StateCacheProvider, StateCache}; + +use alloy_eips::BlockHashOrNumber; +use alloy_primitives::{BlockNumber, TxHash, TxNumber}; +use core::ops::{RangeBounds, RangeInclusive}; +use reth_primitives_traits::{NodePrimitives, ReceiptTy}; +use reth_storage_api::{errors::provider::ProviderResult, ReceiptProvider, ReceiptProviderIdExt}; + +impl> ReceiptProvider + for StateCache +{ + type Receipt = ReceiptTy; + + fn receipt(&self, id: TxNumber) -> ProviderResult> { + self.provider.receipt(id) + } + + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + self.provider.receipt_by_hash(hash) + } + + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { + let cached = match block { + BlockHashOrNumber::Hash(hash) => { + self.inner.read().confirm_cache.get_block_by_hash(&hash) + } + BlockHashOrNumber::Number(num) => { + self.inner.read().confirm_cache.get_block_by_number(num) + } + }; + if let Some(bar) = cached { + return Ok(Some((*bar.receipts).clone())); + } + self.provider.receipts_by_block(block) + } + + fn receipts_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.provider.receipts_by_tx_range(range) + } + + fn receipts_by_block_range( + &self, + block_range: RangeInclusive, + ) -> ProviderResult>> { + self.provider.receipts_by_block_range(block_range) + } +} + +impl> ReceiptProviderIdExt + for StateCache +{ +} diff --git a/crates/flashblocks/src/cache/state/transaction.rs b/crates/flashblocks/src/cache/state/transaction.rs new file mode 100644 index 00000000..f2f9825a --- /dev/null +++ b/crates/flashblocks/src/cache/state/transaction.rs @@ -0,0 +1,82 @@ +use super::{utils::StateCacheProvider, StateCache}; + +use alloy_eips::BlockHashOrNumber; +use alloy_primitives::{Address, BlockNumber, TxHash, TxNumber}; +use core::ops::RangeBounds; +use reth_primitives_traits::{BlockBody, NodePrimitives, TransactionMeta}; +use reth_storage_api::{errors::provider::ProviderResult, TransactionsProvider}; + +impl> TransactionsProvider + for StateCache +{ + type Transaction = N::SignedTx; + + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { + self.provider.transaction_id(tx_hash) + } + + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + self.provider.transaction_by_id(id) + } + + fn transaction_by_id_unhashed( + &self, + id: TxNumber, + ) -> ProviderResult> { + self.provider.transaction_by_id_unhashed(id) + } + + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + self.provider.transaction_by_hash(hash) + } + + fn transaction_by_hash_with_meta( + &self, + hash: TxHash, + ) -> ProviderResult> { + self.provider.transaction_by_hash_with_meta(hash) + } + + fn transactions_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { + let cached = match block { + BlockHashOrNumber::Hash(hash) => { + self.inner.read().confirm_cache.get_block_by_hash(&hash) + } + BlockHashOrNumber::Number(num) => { + self.inner.read().confirm_cache.get_block_by_number(num) + } + }; + if let Some(bar) = cached { + return Ok(Some(bar.block.body().transactions().to_vec())); + } + self.provider.transactions_by_block(block) + } + + fn transactions_by_block_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult>> { + self.provider.transactions_by_block_range(range) + } + + fn transactions_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.provider.transactions_by_tx_range(range) + } + + fn senders_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.provider.senders_by_tx_range(range) + } + + fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { + self.provider.transaction_sender(id) + } +} diff --git a/crates/flashblocks/src/cache/state/utils.rs b/crates/flashblocks/src/cache/state/utils.rs new file mode 100644 index 00000000..d900269d --- /dev/null +++ b/crates/flashblocks/src/cache/state/utils.rs @@ -0,0 +1,34 @@ +use reth_primitives_traits::{Block, BlockTy, HeaderTy, NodePrimitives, ReceiptTy}; +use reth_rpc_eth_types::block::BlockAndReceipts; +use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; + +/// Provider trait bound alias used throughout the `StateCache` implementation. +/// +/// The provider must implement the full reth block reader + state provider stack. +pub(crate) trait StateCacheProvider: + StateProviderFactory + + BlockReaderIdExt< + Header = HeaderTy, + Block = BlockTy, + Transaction = N::SignedTx, + Receipt = ReceiptTy, + > + Unpin +{ +} + +impl StateCacheProvider for P +where + N: NodePrimitives, + P: StateProviderFactory + + BlockReaderIdExt< + Header = HeaderTy, + Block = BlockTy, + Transaction = N::SignedTx, + Receipt = ReceiptTy, + > + Unpin, +{ +} + +pub(crate) fn block_from_bar(bar: &BlockAndReceipts) -> BlockTy { + BlockTy::::new(bar.block.header().clone(), bar.block.body().clone()) +} diff --git a/crates/flashblocks/src/execution/cache.rs b/crates/flashblocks/src/execution/cache.rs index 93b744f5..2fbdd5d6 100644 --- a/crates/flashblocks/src/execution/cache.rs +++ b/crates/flashblocks/src/execution/cache.rs @@ -193,11 +193,12 @@ impl TransactionCache { .count() } - /// Returns cached state for resuming execution if the incoming transactions - /// have a matching prefix with the cache. + /// Returns cached state for resuming execution if the incoming transactions have a + /// matching prefix with the cache. /// - /// Returns `Some((bundle, receipts, requests, gas_used, blob_gas_used, skip_count))` if - /// there's a non-empty matching prefix, where: + /// Returns `Some((bundle, receipts, requests, gas_used, blob_gas_used, skip_count))` + /// if there's a non-empty matching prefix, and the full cache matches the incoming + /// prefix, where: /// - `bundle` is the cumulative state after the matching prefix /// - `receipts` is the receipts for the matching prefix /// - `skip_count` is the number of transactions to skip @@ -233,13 +234,13 @@ impl TransactionCache { )) } - /// Returns cached state and execution metadata for resuming execution if the incoming - /// transactions have a matching prefix with the cache and the parent hash matches. + /// Returns cached state for resuming execution if the incoming transactions have a + /// matching prefix with the cache and the parent hash matches. /// - /// Returns `Some((bundle, receipts, requests, gas_used, blob_gas_used, skip_count))` if - /// there's a non-empty matching prefix, the full cache matches the incoming prefix, and the + /// Returns `Some((bundle, receipts, requests, gas_used, blob_gas_used, skip_count))` + /// if there's a non-empty matching prefix, where the full cache matches the incoming prefix, and the /// `(block_number, parent_hash)` tuple matches the cached scope. - pub(crate) fn get_resumable_state_with_execution_meta_for_parent( + pub(crate) fn get_resumable_state_for_parent( &self, block_number: u64, parent_hash: B256, @@ -494,7 +495,7 @@ mod tests { let fb1_txs = vec![tx_a, tx_b]; let result = cache.get_resumable_state(100, &fb1_txs); assert!(result.is_some()); - assert_eq!(result.unwrap().5, 1); // 1 tx covered by cache + assert_eq!(result.unwrap().2, 1); // 1 tx covered by cache cache.update(100, fb1_txs, BundleState::default(), vec![]); assert_eq!(cache.len(), 2); diff --git a/crates/flashblocks/src/execution/mod.rs b/crates/flashblocks/src/execution/mod.rs index 4b0e11bd..751b3fd3 100644 --- a/crates/flashblocks/src/execution/mod.rs +++ b/crates/flashblocks/src/execution/mod.rs @@ -1,4 +1,5 @@ mod cache; -pub(crate) mod worker; +use cache::{CachedExecutionMeta, TransactionCache}; -pub use worker::FlashblockCachedReceipt; +pub(crate) mod worker; +pub use worker::{BuildArgs, BuildResult, FlashblockCachedReceipt}; diff --git a/crates/flashblocks/src/execution/worker.rs b/crates/flashblocks/src/execution/worker.rs index 32877e09..dc3aa394 100644 --- a/crates/flashblocks/src/execution/worker.rs +++ b/crates/flashblocks/src/execution/worker.rs @@ -1,12 +1,12 @@ use crate::{ - execution::cache::{CachedExecutionMeta, TransactionCache}, - types::pending_state::PendingBlockState, - PendingFlashBlock, + cache::{PendingSequence, StateCache}, + execution::{CachedExecutionMeta, TransactionCache}, }; use std::{ sync::Arc, time::{Duration, Instant}, }; +use tokio_util::sync::CancellationToken; use tracing::trace; use alloy_eips::{eip2718::WithEncoded, BlockNumberOrTag}; @@ -38,19 +38,31 @@ use reth_storage_api::{ StateRootProvider, }; -/// The `FlashBlockBuilder` builds [`PendingBlock`] out of a sequence of transactions. +pub(crate) struct BuildArgs { + pub(crate) base: OpFlashblockPayloadBase, + pub(crate) transactions: I, + pub(crate) cached_state: Option<(B256, CachedReads)>, + pub(crate) last_flashblock_index: u64, + pub(crate) cancel: CancellationToken, +} + +/// The `FlashblocksValidator` builds [`PendingBlock`] out of a sequence of transactions. /// /// Owns a [`TransactionCache`] for incremental prefix caching between flashblock builds. #[derive(Debug)] -pub(crate) struct FlashBlockBuilder { +pub(crate) struct FlashblocksValidator { + /// The EVM configuration used to build the flashblocks. evm_config: EvmConfig, - provider: Provider, + /// The transaction execution cache for incremental executions. tx_cache: TransactionCache, + /// The state cache containing the canonical chainstate provider and the flashblocks + /// state cache layer. + state_cache: StateCache, } -impl FlashBlockBuilder { - pub(crate) fn new(evm_config: EvmConfig, provider: Provider) -> Self { - Self { evm_config, provider, tx_cache: TransactionCache::new() } +impl FlashblocksValidator { + pub(crate) fn new(evm_config: EvmConfig, state_cache: StateCache) -> Self { + Self { evm_config, state_cache, tx_cache: TransactionCache::new() } } pub(crate) const fn provider(&self) -> &Provider { @@ -61,56 +73,6 @@ impl FlashBlockBuilder - FlashBlockBuilder -{ - /// Clones the builder config and moves the transaction cache into the new - /// builder, leaving `self` with an empty cache. - /// - /// Used before spawning a blocking build task. - pub(crate) fn fork_with_cache(&mut self) -> Self { - Self { - evm_config: self.evm_config.clone(), - provider: self.provider.clone(), - tx_cache: std::mem::take(&mut self.tx_cache), - } - } - - /// Restores the transaction cache from a completed forked builder. - pub(crate) fn merge_cache(&mut self, other: Self) { - self.tx_cache = other.tx_cache; - } -} - -pub(crate) struct BuildArgs { - pub(crate) base: OpFlashblockPayloadBase, - pub(crate) transactions: I, - pub(crate) cached_state: Option<(B256, CachedReads)>, - pub(crate) last_flashblock_index: u64, - pub(crate) last_flashblock_hash: B256, - pub(crate) compute_state_root: bool, - /// Optional pending parent state for speculative building. - /// When set, allows building on top of a pending block that hasn't been - /// canonicalized yet. - pub(crate) pending_parent: Option>, -} - -/// Result of a flashblock build operation. -#[derive(Debug)] -pub(crate) struct BuildResult { - /// The built pending flashblock. - pub(crate) pending_flashblock: PendingFlashBlock, - /// Cached reads from this build. - pub(crate) cached_reads: CachedReads, - /// Pending state that can be used for building subsequent blocks. - pub(crate) pending_state: PendingBlockState, } /// Cached prefix execution data used to resume canonical builds. @@ -160,12 +122,10 @@ where Receipt = ReceiptTy, > + Unpin, { - /// Returns the [`PendingFlashBlock`] made purely out of transactions and - /// [`OpFlashblockPayloadBase`] in `args`. + /// Returns the [`PendingSequence`], which contains the full built execution state of + /// the flashblocks sequence passed in `BuildArgs`. /// - /// This method supports two building modes: - /// 1. **Canonical mode**: Parent matches local tip - uses state from storage - /// 2. **Speculative mode**: Parent is a pending block - uses pending state + /// The /// /// In canonical mode, the internal transaction cache is used to resume from /// cached state if the transaction list is a continuation of what was previously @@ -176,10 +136,14 @@ where /// - In speculative mode: no pending parent state provided pub(crate) fn execute>>>( &mut self, - mut args: BuildArgs, - ) -> eyre::Result>> { + mut args: BuildArgs, + ) -> eyre::Result> { trace!(target: "flashblocks", "Attempting new pending block from flashblocks"); + let parent_hash = args.base.parent_hash; + let parent_header = self.state_cache.latest_header(parent_hash)?; + let state_provider = self.state_cache.history_by_block_hash(parent_header.hash())?; + let latest = self .provider .latest_header()? diff --git a/crates/flashblocks/src/test_utils.rs b/crates/flashblocks/src/test_utils.rs index deea2cf5..e1f2bfd9 100644 --- a/crates/flashblocks/src/test_utils.rs +++ b/crates/flashblocks/src/test_utils.rs @@ -48,11 +48,11 @@ //! .build(); //! ``` -use crate::FlashBlock; use alloy_primitives::{Address, Bloom, Bytes, B256, U256}; use alloy_rpc_types_engine::PayloadId; use op_alloy_rpc_types_engine::{ - OpFlashblockPayloadBase, OpFlashblockPayloadDelta, OpFlashblockPayloadMetadata, + OpFlashblockPayload, OpFlashblockPayloadBase, OpFlashblockPayloadDelta, + OpFlashblockPayloadMetadata, }; /// Factory for creating test flashblocks with automatic timestamp management. @@ -119,7 +119,7 @@ impl TestFlashBlockFactory { /// let fb1 = factory.flashblock_after(&fb0).build(); // Simple /// let fb2 = factory.flashblock_after(&fb1).transactions(txs).build(); // With txs /// ``` - pub(crate) fn flashblock_after(&self, previous: &FlashBlock) -> TestFlashBlockBuilder { + pub(crate) fn flashblock_after(&self, previous: &OpFlashblockPayload) -> TestFlashBlockBuilder { let parent_hash = previous.base.as_ref().map(|b| b.parent_hash).unwrap_or(previous.diff.block_hash); @@ -145,7 +145,10 @@ impl TestFlashBlockFactory { /// let fb1 = factory.flashblock_for_next_block(&fb0).build(); // Block 101, timestamp 1000002 /// let fb2 = factory.flashblock_for_next_block(&fb1).transactions(txs).build(); // Customize /// ``` - pub(crate) fn flashblock_for_next_block(&self, previous: &FlashBlock) -> TestFlashBlockBuilder { + pub(crate) fn flashblock_for_next_block( + &self, + previous: &OpFlashblockPayload, + ) -> TestFlashBlockBuilder { let prev_timestamp = previous.base.as_ref().map(|b| b.timestamp).unwrap_or(self.base_timestamp); @@ -290,7 +293,7 @@ impl TestFlashBlockBuilder { /// Builds the flashblock. /// /// If index is 0 and no base was explicitly set, creates a default base. - pub(crate) fn build(mut self) -> FlashBlock { + pub(crate) fn build(mut self) -> OpFlashblockPayload { // Auto-create base for index 0 if not set if self.index == 0 && self.base.is_none() { self.base = Some(OpFlashblockPayloadBase { @@ -306,7 +309,7 @@ impl TestFlashBlockBuilder { }); } - FlashBlock { + OpFlashblockPayload { index: self.index, payload_id: self.payload_id, base: self.base, diff --git a/crates/flashblocks/src/ws/decoding.rs b/crates/flashblocks/src/ws/decoding.rs index 64d96dc5..a09ef329 100644 --- a/crates/flashblocks/src/ws/decoding.rs +++ b/crates/flashblocks/src/ws/decoding.rs @@ -1,24 +1,25 @@ -use crate::FlashBlock; -use alloy_primitives::bytes::Bytes; use std::io; +use alloy_primitives::bytes::Bytes; +use op_alloy_rpc_types_engine::OpFlashblockPayload; + /// A trait for decoding flashblocks from bytes. pub trait FlashBlockDecoder: Send + 'static { - /// Decodes `bytes` into a [`FlashBlock`]. - fn decode(&self, bytes: Bytes) -> eyre::Result; + /// Decodes `bytes` into a [`OpFlashblockPayload`]. + fn decode(&self, bytes: Bytes) -> eyre::Result; } /// Default implementation of the decoder. impl FlashBlockDecoder for () { - fn decode(&self, bytes: Bytes) -> eyre::Result { + fn decode(&self, bytes: Bytes) -> eyre::Result { decode_flashblock(bytes) } } -pub(crate) fn decode_flashblock(bytes: Bytes) -> eyre::Result { +pub(crate) fn decode_flashblock(bytes: Bytes) -> eyre::Result { let bytes = crate::ws::decoding::try_parse_message(bytes)?; - let payload: FlashBlock = + let payload: OpFlashblockPayload = serde_json::from_slice(&bytes).map_err(|e| eyre::eyre!("failed to parse message: {e}"))?; Ok(payload) diff --git a/crates/flashblocks/src/ws/stream.rs b/crates/flashblocks/src/ws/stream.rs index 18726219..39666833 100644 --- a/crates/flashblocks/src/ws/stream.rs +++ b/crates/flashblocks/src/ws/stream.rs @@ -1,4 +1,4 @@ -use crate::{ws::FlashBlockDecoder, FlashBlock}; +use crate::ws::FlashBlockDecoder; use futures_util::{ stream::{SplitSink, SplitStream}, FutureExt, Sink, Stream, StreamExt, @@ -18,7 +18,9 @@ use tokio_tungstenite::{ use tracing::debug; use url::Url; -/// An asynchronous stream of [`FlashBlock`] from a websocket connection. +use op_alloy_rpc_types_engine::OpFlashblockPayload; + +/// An asynchronous stream of [`OpFlashblockPayload`] from a websocket connection. /// /// The stream attempts to connect to a websocket URL and then decode each received item. /// @@ -48,7 +50,7 @@ impl WsFlashBlockStream { } } - /// Sets the [`FlashBlock`] decoder for the websocket stream. + /// Sets the [`OpFlashblockPayload`] decoder for the websocket stream. pub fn with_decoder(self, decoder: Box) -> Self { Self { decoder, ..self } } @@ -75,7 +77,7 @@ where S: Sink + Send + Unpin, C: WsConnect + Clone + Send + 'static + Unpin, { - type Item = eyre::Result; + type Item = eyre::Result; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -424,22 +426,22 @@ mod tests { fn to_json_message, F: Fn(B) -> Message>( wrapper_f: F, - ) -> impl Fn(&FlashBlock) -> Result + use { + ) -> impl Fn(&OpFlashblockPayload) -> Result + use { move |block| to_json_message_using(block, &wrapper_f) } - fn to_json_binary_message(block: &FlashBlock) -> Result { + fn to_json_binary_message(block: &OpFlashblockPayload) -> Result { to_json_message_using(block, Message::Binary) } fn to_json_message_using, F: Fn(B) -> Message>( - block: &FlashBlock, + block: &OpFlashblockPayload, wrapper_f: F, ) -> Result { Ok(wrapper_f(B::try_from(Bytes::from(serde_json::to_vec(block).unwrap())).unwrap())) } - fn to_brotli_message(block: &FlashBlock) -> Result { + fn to_brotli_message(block: &OpFlashblockPayload) -> Result { let json = serde_json::to_vec(block).unwrap(); let mut compressed = Vec::new(); brotli::BrotliCompress( @@ -451,7 +453,7 @@ mod tests { Ok(Message::Binary(Bytes::from(compressed))) } - fn flashblock() -> FlashBlock { + fn flashblock() -> OpFlashblockPayload { Default::default() } @@ -460,7 +462,7 @@ mod tests { #[test_case::test_case(to_brotli_message; "brotli")] #[tokio::test] async fn test_stream_decodes_messages_successfully( - to_message: impl Fn(&FlashBlock) -> Result, + to_message: impl Fn(&OpFlashblockPayload) -> Result, ) { let flashblocks = [flashblock()]; let connector = FakeConnector::from(flashblocks.iter().map(to_message)); From 2cb1ea3c3b2312ddb832a12fb9949c732cecb33f Mon Sep 17 00:00:00 2001 From: Niven Date: Tue, 10 Mar 2026 18:08:32 +0800 Subject: [PATCH 07/76] fix(flashblocks-rpc): fix range bounds trait impl operations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.6 --- crates/flashblocks/src/cache/state/block.rs | 21 ++++++++-- crates/flashblocks/src/cache/state/header.rs | 38 +++-------------- crates/flashblocks/src/cache/state/id.rs | 41 +++++-------------- crates/flashblocks/src/cache/state/mod.rs | 41 +++++++++++++++++++ crates/flashblocks/src/cache/state/receipt.rs | 7 +++- .../src/cache/state/transaction.rs | 11 ++++- 6 files changed, 92 insertions(+), 67 deletions(-) diff --git a/crates/flashblocks/src/cache/state/block.rs b/crates/flashblocks/src/cache/state/block.rs index ba0f1447..5a8d81d4 100644 --- a/crates/flashblocks/src/cache/state/block.rs +++ b/crates/flashblocks/src/cache/state/block.rs @@ -94,21 +94,36 @@ impl> BlockReader for StateCa } fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - self.provider.block_range(range) + self.collect_cached_block_range( + *range.start(), + *range.end(), + |bar| block_from_bar(bar), + |r| self.provider.block_range(r), + ) } fn block_with_senders_range( &self, range: RangeInclusive, ) -> ProviderResult>> { - self.provider.block_with_senders_range(range) + self.collect_cached_block_range( + *range.start(), + *range.end(), + |bar| (*bar.block).clone(), + |r| self.provider.block_with_senders_range(r), + ) } fn recovered_block_range( &self, range: RangeInclusive, ) -> ProviderResult>> { - self.provider.recovered_block_range(range) + self.collect_cached_block_range( + *range.start(), + *range.end(), + |bar| (*bar.block).clone(), + |r| self.provider.recovered_block_range(r), + ) } fn block_by_transaction_id(&self, id: TxNumber) -> ProviderResult> { diff --git a/crates/flashblocks/src/cache/state/header.rs b/crates/flashblocks/src/cache/state/header.rs index 2ff52b0e..003cf337 100644 --- a/crates/flashblocks/src/cache/state/header.rs +++ b/crates/flashblocks/src/cache/state/header.rs @@ -34,38 +34,12 @@ impl> HeaderProvider if start > end { return Ok(Vec::new()); } - - let inner = self.inner.read(); - - // Walk backwards from the end of the inclusive range, collecting - // consecutive cache hits from the confirm cache tail. - let mut cache_headers = Vec::new(); - let mut provider_end = end; - let mut index = end; - loop { - if let Some(bar) = inner.confirm_cache.get_block_by_number(index) { - cache_headers.push(bar.block.header().clone()); - provider_end = index.saturating_sub(1); - } else { - break; - } - if index == start { - break; - } - index -= 1; - } - cache_headers.reverse(); - drop(inner); - - // Delegate the remaining prefix [start..=provider_end] to the provider. - let mut result = - if provider_end >= start && cache_headers.len() < (end - start + 1) as usize { - self.provider.headers_range(start..=provider_end)? - } else { - Vec::new() - }; - result.extend(cache_headers); - Ok(result) + self.collect_cached_block_range( + start, + end, + |bar| bar.block.header().clone(), + |r| self.provider.headers_range(r), + ) } fn sealed_header( diff --git a/crates/flashblocks/src/cache/state/id.rs b/crates/flashblocks/src/cache/state/id.rs index e647959c..efba6c66 100644 --- a/crates/flashblocks/src/cache/state/id.rs +++ b/crates/flashblocks/src/cache/state/id.rs @@ -29,36 +29,17 @@ impl> BlockHashReader // Aligns with underlying blockchain provider return Ok(Vec::new()); } - - let inner = self.inner.read(); - let mut cache_hashes = Vec::new(); - let mut provider_end = end; - let mut index = end - 1; - loop { - if let Some(hash) = inner.confirm_cache.hash_for_number(index) { - cache_hashes.push(hash); - provider_end = index; - } else { - break; - } - // Guard against underflow when index == 0, and stop once we've - // covered the full range down to `start` - if index == start { - break; - } - index -= 1; - } - cache_hashes.reverse(); - drop(inner); - - // Delegate the remaining prefix [start, provider_end) to the provider - let mut result = if provider_end > start { - self.provider.canonical_hashes_range(start, provider_end)? - } else { - Vec::new() - }; - result.extend(cache_hashes); - Ok(result) + // Provider uses half-open [start, end), convert to inclusive for the helper + self.collect_cached_block_range( + start, + end - 1, + |bar| bar.block.hash(), + |r| { + // Convert back to half-open [start, end) for the provider + let end_exclusive = r.end().saturating_add(1); + self.provider.canonical_hashes_range(*r.start(), end_exclusive) + }, + ) } } diff --git a/crates/flashblocks/src/cache/state/mod.rs b/crates/flashblocks/src/cache/state/mod.rs index a2544a8d..a420c5b9 100644 --- a/crates/flashblocks/src/cache/state/mod.rs +++ b/crates/flashblocks/src/cache/state/mod.rs @@ -92,6 +92,47 @@ impl> StateCache self.inner.read().pending.as_ref().map(|p| p.pending.block().number()) } + /// Collects items from an inclusive block number range `[start..=end]`, using + /// the confirm cache as an overlay on top of the provider. + /// + /// Walks backward from `end`, collecting consecutive cache hits via `from_cache`. + /// Delegates the remaining prefix `[start..=provider_end]` to `from_provider`. + fn collect_cached_block_range( + &self, + start: BlockNumber, + end: BlockNumber, + from_cache: impl Fn(&BlockAndReceipts) -> T, + from_provider: impl FnOnce(core::ops::RangeInclusive) -> ProviderResult>, + ) -> ProviderResult> { + let inner = self.inner.read(); + let mut cache_items = Vec::new(); + let mut provider_end = end; + let mut index = end; + loop { + if let Some(bar) = inner.confirm_cache.get_block_by_number(index) { + cache_items.push(from_cache(&bar)); + provider_end = index.saturating_sub(1); + } else { + break; + } + if index == start { + break; + } + index -= 1; + } + cache_items.reverse(); + drop(inner); + + let mut result = if provider_end >= start && cache_items.len() < (end - start + 1) as usize + { + from_provider(start..=provider_end)? + } else { + Vec::new() + }; + result.extend(cache_items); + Ok(result) + } + /// Resolves an `impl RangeBounds` into an inclusive `(start, end)` pair. /// Matches reth's blockchain provider's convert_range_bounds semantics, and unbounded /// ends are resolved to `best_block_number`. diff --git a/crates/flashblocks/src/cache/state/receipt.rs b/crates/flashblocks/src/cache/state/receipt.rs index c81c6aaf..4d0ed98a 100644 --- a/crates/flashblocks/src/cache/state/receipt.rs +++ b/crates/flashblocks/src/cache/state/receipt.rs @@ -48,7 +48,12 @@ impl> ReceiptProvider &self, block_range: RangeInclusive, ) -> ProviderResult>> { - self.provider.receipts_by_block_range(block_range) + self.collect_cached_block_range( + *block_range.start(), + *block_range.end(), + |bar| (*bar.receipts).clone(), + |r| self.provider.receipts_by_block_range(r), + ) } } diff --git a/crates/flashblocks/src/cache/state/transaction.rs b/crates/flashblocks/src/cache/state/transaction.rs index f2f9825a..c4c8d3f5 100644 --- a/crates/flashblocks/src/cache/state/transaction.rs +++ b/crates/flashblocks/src/cache/state/transaction.rs @@ -59,7 +59,16 @@ impl> TransactionsProvider &self, range: impl RangeBounds, ) -> ProviderResult>> { - self.provider.transactions_by_block_range(range) + let (start, end) = self.resolve_range_bounds(range)?; + if start > end { + return Ok(Vec::new()); + } + self.collect_cached_block_range( + start, + end, + |bar| bar.block.body().transactions().to_vec(), + |r| self.provider.transactions_by_block_range(r), + ) } fn transactions_by_tx_range( From 59164fd3dc139e428e838f628621f19b6fb1600f Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 11 Mar 2026 11:37:32 +0800 Subject: [PATCH 08/76] chore(flashblocks-rpc): refactor state mod MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.6 --- .../src/cache/{state => }/block.rs | 13 +++--- .../src/cache/{state => }/factory.rs | 4 +- .../src/cache/{state => }/header.rs | 4 +- .../flashblocks/src/cache/{state => }/id.rs | 10 +++-- crates/flashblocks/src/cache/mod.rs | 17 +++++-- .../src/cache/{state => }/receipt.rs | 6 +-- .../src/cache/{state/mod.rs => state.rs} | 44 ++++++++----------- .../src/cache/{state => }/transaction.rs | 4 +- .../src/cache/{state => }/utils.rs | 2 +- crates/flashblocks/src/execution/worker.rs | 9 ++-- 10 files changed, 60 insertions(+), 53 deletions(-) rename crates/flashblocks/src/cache/{state => }/block.rs (96%) rename crates/flashblocks/src/cache/{state => }/factory.rs (96%) rename crates/flashblocks/src/cache/{state => }/header.rs (95%) rename crates/flashblocks/src/cache/{state => }/id.rs (94%) rename crates/flashblocks/src/cache/{state => }/receipt.rs (92%) rename crates/flashblocks/src/cache/{state/mod.rs => state.rs} (89%) rename crates/flashblocks/src/cache/{state => }/transaction.rs (96%) rename crates/flashblocks/src/cache/{state => }/utils.rs (91%) diff --git a/crates/flashblocks/src/cache/state/block.rs b/crates/flashblocks/src/cache/block.rs similarity index 96% rename from crates/flashblocks/src/cache/state/block.rs rename to crates/flashblocks/src/cache/block.rs index 5a8d81d4..2694fa93 100644 --- a/crates/flashblocks/src/cache/state/block.rs +++ b/crates/flashblocks/src/cache/block.rs @@ -1,7 +1,4 @@ -use super::{ - utils::{block_from_bar, StateCacheProvider}, - StateCache, -}; +use crate::cache::{block_from_bar, FlashblockStateCache, StateCacheProvider}; use alloy_eips::{BlockHashOrNumber, BlockId}; use alloy_primitives::{BlockNumber, TxNumber, B256}; @@ -13,7 +10,9 @@ use reth_storage_api::{ BlockSource, HeaderProvider, TransactionVariant, }; -impl> BlockReader for StateCache { +impl> BlockReader + for FlashblockStateCache +{ type Block = BlockTy; fn find_block_by_hash( @@ -132,7 +131,7 @@ impl> BlockReader for StateCa } impl> BlockReaderIdExt - for StateCache + for FlashblockStateCache { fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { @@ -160,7 +159,7 @@ impl> BlockReaderIdExt } impl> BlockBodyIndicesProvider - for StateCache + for FlashblockStateCache { fn block_body_indices(&self, num: u64) -> ProviderResult> { self.provider.block_body_indices(num) diff --git a/crates/flashblocks/src/cache/state/factory.rs b/crates/flashblocks/src/cache/factory.rs similarity index 96% rename from crates/flashblocks/src/cache/state/factory.rs rename to crates/flashblocks/src/cache/factory.rs index 217a0641..008c7311 100644 --- a/crates/flashblocks/src/cache/state/factory.rs +++ b/crates/flashblocks/src/cache/factory.rs @@ -1,4 +1,4 @@ -use super::{utils::StateCacheProvider, StateCache}; +use crate::cache::{FlashblockStateCache, StateCacheProvider}; use alloy_eips::BlockNumberOrTag; use alloy_primitives::{BlockNumber, B256}; @@ -6,7 +6,7 @@ use reth_primitives_traits::NodePrimitives; use reth_storage_api::{errors::provider::ProviderResult, StateProviderBox, StateProviderFactory}; impl> StateProviderFactory - for StateCache + for FlashblockStateCache { fn latest(&self) -> ProviderResult { // Determine effective latest: if confirm cache is strictly ahead of the diff --git a/crates/flashblocks/src/cache/state/header.rs b/crates/flashblocks/src/cache/header.rs similarity index 95% rename from crates/flashblocks/src/cache/state/header.rs rename to crates/flashblocks/src/cache/header.rs index 003cf337..88f1e5e7 100644 --- a/crates/flashblocks/src/cache/state/header.rs +++ b/crates/flashblocks/src/cache/header.rs @@ -1,4 +1,4 @@ -use super::{utils::StateCacheProvider, StateCache}; +use crate::cache::{FlashblockStateCache, StateCacheProvider}; use alloy_primitives::{BlockNumber, B256}; use core::ops::RangeBounds; @@ -6,7 +6,7 @@ use reth_primitives_traits::{HeaderTy, NodePrimitives, SealedHeader}; use reth_storage_api::{errors::provider::ProviderResult, HeaderProvider}; impl> HeaderProvider - for StateCache + for FlashblockStateCache { type Header = HeaderTy; diff --git a/crates/flashblocks/src/cache/state/id.rs b/crates/flashblocks/src/cache/id.rs similarity index 94% rename from crates/flashblocks/src/cache/state/id.rs rename to crates/flashblocks/src/cache/id.rs index efba6c66..e76a13c3 100644 --- a/crates/flashblocks/src/cache/state/id.rs +++ b/crates/flashblocks/src/cache/id.rs @@ -1,4 +1,4 @@ -use super::{utils::StateCacheProvider, StateCache}; +use crate::cache::{FlashblockStateCache, StateCacheProvider}; use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; @@ -10,7 +10,7 @@ use reth_storage_api::{ }; impl> BlockHashReader - for StateCache + for FlashblockStateCache { fn block_hash(&self, number: BlockNumber) -> ProviderResult> { if let Some(hash) = self.inner.read().confirm_cache.hash_for_number(number) { @@ -44,7 +44,7 @@ impl> BlockHashReader } impl> BlockNumReader - for StateCache + for FlashblockStateCache { fn chain_info(&self) -> ProviderResult { let mut info = self.provider.chain_info()?; @@ -78,7 +78,9 @@ impl> BlockNumReader } } -impl> BlockIdReader for StateCache { +impl> BlockIdReader + for FlashblockStateCache +{ fn pending_block_num_hash(&self) -> ProviderResult> { let inner = self.inner.read(); if let Some(pending) = &inner.pending { diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 8c68853e..5167acea 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -1,9 +1,18 @@ -pub(crate) mod confirm; -pub(crate) mod pending; +mod block; +mod confirm; +mod factory; +mod header; +mod id; +mod pending; pub(crate) mod raw; -pub(crate) mod state; +mod receipt; +mod state; +mod transaction; +mod utils; pub(crate) use confirm::ConfirmCache; pub(crate) use pending::PendingSequence; pub(crate) use raw::RawFlashblocksCache; -pub use state::StateCache; +pub(crate) use utils::{block_from_bar, StateCacheProvider}; + +pub use state::FlashblockStateCache; diff --git a/crates/flashblocks/src/cache/state/receipt.rs b/crates/flashblocks/src/cache/receipt.rs similarity index 92% rename from crates/flashblocks/src/cache/state/receipt.rs rename to crates/flashblocks/src/cache/receipt.rs index 4d0ed98a..955c3aa7 100644 --- a/crates/flashblocks/src/cache/state/receipt.rs +++ b/crates/flashblocks/src/cache/receipt.rs @@ -1,4 +1,4 @@ -use super::{utils::StateCacheProvider, StateCache}; +use crate::cache::{FlashblockStateCache, StateCacheProvider}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, TxHash, TxNumber}; @@ -7,7 +7,7 @@ use reth_primitives_traits::{NodePrimitives, ReceiptTy}; use reth_storage_api::{errors::provider::ProviderResult, ReceiptProvider, ReceiptProviderIdExt}; impl> ReceiptProvider - for StateCache + for FlashblockStateCache { type Receipt = ReceiptTy; @@ -58,6 +58,6 @@ impl> ReceiptProvider } impl> ReceiptProviderIdExt - for StateCache + for FlashblockStateCache { } diff --git a/crates/flashblocks/src/cache/state/mod.rs b/crates/flashblocks/src/cache/state.rs similarity index 89% rename from crates/flashblocks/src/cache/state/mod.rs rename to crates/flashblocks/src/cache/state.rs index a420c5b9..f173665c 100644 --- a/crates/flashblocks/src/cache/state/mod.rs +++ b/crates/flashblocks/src/cache/state.rs @@ -1,19 +1,10 @@ -mod block; -mod factory; -mod header; -mod id; -mod receipt; -mod transaction; -pub(crate) mod utils; - -use crate::cache::{ConfirmCache, PendingSequence}; +use crate::cache::{ConfirmCache, PendingSequence, StateCacheProvider}; +use core::ops::RangeBounds; use parking_lot::RwLock; use std::sync::Arc; -use utils::StateCacheProvider; use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, B256}; -use core::ops::RangeBounds; use reth_primitives_traits::NodePrimitives; use reth_rpc_eth_types::block::BlockAndReceipts; use reth_storage_api::{errors::provider::ProviderResult, BlockNumReader}; @@ -44,16 +35,19 @@ use reth_storage_api::{errors::provider::ProviderResult, BlockNumReader}; /// state, ensuring atomic operations across pending, confirmed, and height /// state (e.g. reorg detection + flush + insert in `handle_confirmed_block`). #[derive(Debug, Clone)] -pub struct StateCache { - inner: Arc>>, - provider: Provider, +pub struct FlashblockStateCache { + pub(super) inner: Arc>>, + pub(super) provider: Provider, } -impl> StateCache { - /// Creates a new [`StateCache`]. +impl> FlashblockStateCache { + /// Creates a new [`FlashblockStateCache`]. pub fn new(provider: Provider) -> eyre::Result { let canon_height = provider.best_block_number()?; - Ok(Self { inner: Arc::new(RwLock::new(StateCacheInner::new(canon_height))), provider }) + Ok(Self { + inner: Arc::new(RwLock::new(FlashblockStateCacheInner::new(canon_height))), + provider, + }) } /// Returns a reference to the underlying chainstate provider. @@ -97,7 +91,7 @@ impl> StateCache /// /// Walks backward from `end`, collecting consecutive cache hits via `from_cache`. /// Delegates the remaining prefix `[start..=provider_end]` to `from_provider`. - fn collect_cached_block_range( + pub(super) fn collect_cached_block_range( &self, start: BlockNumber, end: BlockNumber, @@ -136,7 +130,7 @@ impl> StateCache /// Resolves an `impl RangeBounds` into an inclusive `(start, end)` pair. /// Matches reth's blockchain provider's convert_range_bounds semantics, and unbounded /// ends are resolved to `best_block_number`. - fn resolve_range_bounds( + pub(super) fn resolve_range_bounds( &self, range: impl RangeBounds, ) -> ProviderResult<(BlockNumber, BlockNumber)> { @@ -156,18 +150,18 @@ impl> StateCache /// Inner state of the flashblocks state cache. #[derive(Debug)] -struct StateCacheInner { +pub(super) struct FlashblockStateCacheInner { /// The current in-progress pending flashblock sequence, if any. - pending: Option>, + pub(super) pending: Option>, /// Cache of confirmed flashblock sequences ahead of the canonical chain. - confirm_cache: ConfirmCache, + pub(super) confirm_cache: ConfirmCache, /// The highest confirmed block height. - confirm_height: Option, + pub(super) confirm_height: Option, /// The highest canonical block height. - canon_height: u64, + pub(super) canon_height: u64, } -impl StateCacheInner { +impl FlashblockStateCacheInner { fn new(canon_height: u64) -> Self { Self { pending: None, diff --git a/crates/flashblocks/src/cache/state/transaction.rs b/crates/flashblocks/src/cache/transaction.rs similarity index 96% rename from crates/flashblocks/src/cache/state/transaction.rs rename to crates/flashblocks/src/cache/transaction.rs index c4c8d3f5..c274c8d7 100644 --- a/crates/flashblocks/src/cache/state/transaction.rs +++ b/crates/flashblocks/src/cache/transaction.rs @@ -1,4 +1,4 @@ -use super::{utils::StateCacheProvider, StateCache}; +use crate::cache::{FlashblockStateCache, StateCacheProvider}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockNumber, TxHash, TxNumber}; @@ -7,7 +7,7 @@ use reth_primitives_traits::{BlockBody, NodePrimitives, TransactionMeta}; use reth_storage_api::{errors::provider::ProviderResult, TransactionsProvider}; impl> TransactionsProvider - for StateCache + for FlashblockStateCache { type Transaction = N::SignedTx; diff --git a/crates/flashblocks/src/cache/state/utils.rs b/crates/flashblocks/src/cache/utils.rs similarity index 91% rename from crates/flashblocks/src/cache/state/utils.rs rename to crates/flashblocks/src/cache/utils.rs index d900269d..aa0eefcb 100644 --- a/crates/flashblocks/src/cache/state/utils.rs +++ b/crates/flashblocks/src/cache/utils.rs @@ -2,7 +2,7 @@ use reth_primitives_traits::{Block, BlockTy, HeaderTy, NodePrimitives, ReceiptTy use reth_rpc_eth_types::block::BlockAndReceipts; use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; -/// Provider trait bound alias used throughout the `StateCache` implementation. +/// Provider trait bound alias used throughout the `FlashblockStateCache` implementation. /// /// The provider must implement the full reth block reader + state provider stack. pub(crate) trait StateCacheProvider: diff --git a/crates/flashblocks/src/execution/worker.rs b/crates/flashblocks/src/execution/worker.rs index dc3aa394..4d0f9154 100644 --- a/crates/flashblocks/src/execution/worker.rs +++ b/crates/flashblocks/src/execution/worker.rs @@ -1,5 +1,5 @@ use crate::{ - cache::{PendingSequence, StateCache}, + cache::{FlashblockStateCache, PendingSequence}, execution::{CachedExecutionMeta, TransactionCache}, }; use std::{ @@ -57,11 +57,14 @@ pub(crate) struct FlashblocksValidator { tx_cache: TransactionCache, /// The state cache containing the canonical chainstate provider and the flashblocks /// state cache layer. - state_cache: StateCache, + state_cache: FlashblockStateCache, } impl FlashblocksValidator { - pub(crate) fn new(evm_config: EvmConfig, state_cache: StateCache) -> Self { + pub(crate) fn new( + evm_config: EvmConfig, + state_cache: FlashblockStateCache, + ) -> Self { Self { evm_config, state_cache, tx_cache: TransactionCache::new() } } From dd25891a048365328b17c5d198aba0a22a7dda27 Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 11 Mar 2026 12:07:53 +0800 Subject: [PATCH 09/76] feat(flashblocks-rpc): add transaction index to confirm cache Add a tertiary tx hash index (HashMap>) to ConfirmCache, enabling O(1) lookups for transaction_by_hash, transaction_by_hash_with_meta, and receipt_by_hash from confirmed flashblocks that are ahead of the canonical chain. Co-Authored-By: Claude Opus 4.6 --- crates/flashblocks/src/cache/confirm.rs | 96 +++++++++++++++++---- crates/flashblocks/src/cache/receipt.rs | 3 + crates/flashblocks/src/cache/transaction.rs | 27 ++++++ 3 files changed, 108 insertions(+), 18 deletions(-) diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index 293aaff3..4bbd9c7d 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -1,19 +1,42 @@ -use std::collections::{BTreeMap, HashMap}; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; -use alloy_primitives::B256; +use alloy_consensus::transaction::TxHashRef; +use alloy_primitives::{TxHash, B256}; use eyre::eyre; -use reth_primitives_traits::NodePrimitives; +use reth_primitives_traits::{BlockBody, NodePrimitives, ReceiptTy}; use reth_rpc_eth_types::block::BlockAndReceipts; -const DEFAULT_CONFIRM_CACHE_SIZE: usize = 5_000; +const DEFAULT_CONFIRM_CACHE_SIZE: usize = 1_000; + +/// Cached transaction info (block context, receipt and tx data) for O(1) lookups +/// by transaction hash. +#[derive(Debug, Clone)] +pub struct CachedTxInfo { + /// Block number containing the transaction. + pub block_number: u64, + /// Block hash containing the transaction. + pub block_hash: B256, + /// Index of the transaction within the block. + pub tx_index: u64, + /// The signed transaction. + pub tx: N::SignedTx, + /// The corresponding receipt. + pub receipt: ReceiptTy, +} /// Confirmed flashblocks sequence cache that is ahead of the current node's canonical /// chainstate. We optimistically commit confirmed flashblocks sequences to the cache /// and flush them when the canonical chainstate catches up. /// /// Block data is stored in a `BTreeMap` keyed by block number, enabling O(log n) -/// range splits in [`flush_up_to`](Self::flush_up_to). A secondary `HashMap` -/// provides O(1) block hash to block number reverse lookups. +/// range splits in [`flush_up_to`](Self::flush_up_to). +/// A secondary `HashMap` provides O(1) block hash to block number reverse lookups. +/// +/// Transaction data is stored in a `HashMap` which indexes transaction hashes to +/// [`CachedTxInfo`] for O(1) tx/receipt lookups. #[derive(Debug)] pub struct ConfirmCache { /// Primary storage: block number → (block hash, block + receipts). @@ -21,6 +44,8 @@ pub struct ConfirmCache { blocks: BTreeMap)>, /// Reverse index: block hash → block number for O(1) hash-based lookups. hash_to_number: HashMap, + /// Transaction index: tx hash → cached tx info for O(1) tx/receipt lookups. + tx_index: HashMap>>, } impl Default for ConfirmCache { @@ -32,7 +57,7 @@ impl Default for ConfirmCache { impl ConfirmCache { /// Creates a new [`ConfirmCache`]. pub fn new() -> Self { - Self { blocks: BTreeMap::new(), hash_to_number: HashMap::new() } + Self { blocks: BTreeMap::new(), hash_to_number: HashMap::new(), tx_index: HashMap::new() } } /// Returns the number of cached entries. @@ -64,11 +89,35 @@ impl ConfirmCache { "confirm cache at max capacity ({DEFAULT_CONFIRM_CACHE_SIZE}), cannot insert block: {height}" )); } + + // Build tx index entries for all transactions in this block + let txs = block.block.body().transactions(); + let receipts = block.receipts.as_ref(); + for (idx, (tx, receipt)) in txs.iter().zip(receipts.iter()).enumerate() { + let tx_hash = *tx.tx_hash(); + let info = Arc::new(CachedTxInfo { + block_number: height, + block_hash: hash, + tx_index: idx as u64, + tx: tx.clone(), + receipt: receipt.clone(), + }); + self.tx_index.insert(tx_hash, info); + } + + // Build block index entries for block data self.hash_to_number.insert(hash, height); self.blocks.insert(height, (hash, block)); Ok(()) } + /// Clears all entries. + pub fn clear(&mut self) { + self.tx_index.clear(); + self.blocks.clear(); + self.hash_to_number.clear(); + } + /// Returns the block number for the given block hash, if cached. pub fn number_for_hash(&self, block_hash: &B256) -> Option { self.hash_to_number.get(block_hash).copied() @@ -89,6 +138,11 @@ impl ConfirmCache { self.blocks.get(&block_number).map(|(_, block)| block.clone()) } + /// Returns the cached transaction info for the given tx hash, if present. + pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option>> { + self.tx_index.get(tx_hash).cloned() + } + /// Returns `true` if the cache contains a block with the given hash. pub fn contains_hash(&self, block_hash: &B256) -> bool { self.hash_to_number.contains_key(block_hash) @@ -100,16 +154,26 @@ impl ConfirmCache { } /// Removes and returns the confirmed block for the given block number. - pub fn remove_by_number(&mut self, block_number: u64) -> Option> { + pub fn remove_block_by_number(&mut self, block_number: u64) -> Option> { let (hash, block) = self.blocks.remove(&block_number)?; self.hash_to_number.remove(&hash); + self.remove_tx_index_for_block(&block); Some(block) } /// Removes and returns the confirmed block for the given block hash. - pub fn remove_by_hash(&mut self, block_hash: &B256) -> Option> { + pub fn remove_block_by_hash(&mut self, block_hash: &B256) -> Option> { let number = self.hash_to_number.remove(block_hash)?; - self.blocks.remove(&number).map(|(_, block)| block) + let (_, block) = self.blocks.remove(&number)?; + self.remove_tx_index_for_block(&block); + Some(block) + } + + /// Removes all tx index entries for the transactions in the given block. + fn remove_tx_index_for_block(&mut self, bar: &BlockAndReceipts) { + for tx in bar.block.body().transactions() { + self.tx_index.remove(&*tx.tx_hash()); + } } /// Flushes all entries with block number >= `from` (the reorged range). @@ -117,8 +181,9 @@ impl ConfirmCache { pub fn flush_from(&mut self, from: u64) -> usize { let reorged = self.blocks.split_off(&from); let count = reorged.len(); - for (hash, _) in reorged.into_values() { + for (hash, bar) in reorged.into_values() { self.hash_to_number.remove(&hash); + self.remove_tx_index_for_block(&bar); } count } @@ -132,15 +197,10 @@ impl ConfirmCache { let stale = std::mem::replace(&mut self.blocks, retained); let count = stale.len(); - for (hash, _) in stale.into_values() { + for (hash, bar) in stale.into_values() { self.hash_to_number.remove(&hash); + self.remove_tx_index_for_block(&bar); } count } - - /// Clears all entries. - pub fn clear(&mut self) { - self.blocks.clear(); - self.hash_to_number.clear(); - } } diff --git a/crates/flashblocks/src/cache/receipt.rs b/crates/flashblocks/src/cache/receipt.rs index 955c3aa7..6d182f0e 100644 --- a/crates/flashblocks/src/cache/receipt.rs +++ b/crates/flashblocks/src/cache/receipt.rs @@ -16,6 +16,9 @@ impl> ReceiptProvider } fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + if let Some(info) = self.inner.read().confirm_cache.get_tx_info(&hash) { + return Ok(Some(info.receipt.clone())); + } self.provider.receipt_by_hash(hash) } diff --git a/crates/flashblocks/src/cache/transaction.rs b/crates/flashblocks/src/cache/transaction.rs index c274c8d7..57861912 100644 --- a/crates/flashblocks/src/cache/transaction.rs +++ b/crates/flashblocks/src/cache/transaction.rs @@ -1,5 +1,6 @@ use crate::cache::{FlashblockStateCache, StateCacheProvider}; +use alloy_consensus::{transaction::TxHashRef, BlockHeader}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockNumber, TxHash, TxNumber}; use core::ops::RangeBounds; @@ -27,6 +28,9 @@ impl> TransactionsProvider } fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + if let Some(info) = self.inner.read().confirm_cache.get_tx_info(&hash) { + return Ok(Some(info.tx.clone())); + } self.provider.transaction_by_hash(hash) } @@ -34,6 +38,29 @@ impl> TransactionsProvider &self, hash: TxHash, ) -> ProviderResult> { + let inner = self.inner.read(); + if let Some(info) = inner.confirm_cache.get_tx_info(&hash) { + // Resolve block header fields from the confirm cache + let bar = inner.confirm_cache.get_block_by_number(info.block_number); + let (base_fee, excess_blob_gas, timestamp) = bar + .map(|b| { + let h = b.block.header(); + (h.base_fee_per_gas(), h.excess_blob_gas(), h.timestamp()) + }) + .unwrap_or_default(); + + let meta = TransactionMeta { + tx_hash: *info.tx.tx_hash(), + index: info.tx_index, + block_hash: info.block_hash, + block_number: info.block_number, + base_fee, + excess_blob_gas, + timestamp, + }; + return Ok(Some((info.tx.clone(), meta))); + } + drop(inner); self.provider.transaction_by_hash_with_meta(hash) } From d805b736f0c3105d9e92588f74c38642690db9e7 Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 11 Mar 2026 12:08:06 +0800 Subject: [PATCH 10/76] feat(flashblocks-rpc): add cache overlay for sealed_headers_while Add collect_cached_block_range_while helper that threads a predicate through both the provider prefix and cached suffix, enabling sealed_headers_while to serve confirmed flashblock headers instead of bypassing the cache entirely. Co-Authored-By: Claude Opus 4.6 --- crates/flashblocks/src/cache/header.rs | 14 +++++- crates/flashblocks/src/cache/state.rs | 67 ++++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 2 deletions(-) diff --git a/crates/flashblocks/src/cache/header.rs b/crates/flashblocks/src/cache/header.rs index 88f1e5e7..41a4dbeb 100644 --- a/crates/flashblocks/src/cache/header.rs +++ b/crates/flashblocks/src/cache/header.rs @@ -56,8 +56,18 @@ impl> HeaderProvider fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, + mut predicate: impl FnMut(&SealedHeader) -> bool, ) -> ProviderResult>> { - self.provider.sealed_headers_while(range, predicate) + let (start, end) = self.resolve_range_bounds(range)?; + if start > end { + return Ok(Vec::new()); + } + self.collect_cached_block_range_while( + start, + end, + |bar| bar.block.sealed_header().clone(), + |r, pred| self.provider.sealed_headers_while(r, pred), + &mut predicate, + ) } } diff --git a/crates/flashblocks/src/cache/state.rs b/crates/flashblocks/src/cache/state.rs index f173665c..d05a92fe 100644 --- a/crates/flashblocks/src/cache/state.rs +++ b/crates/flashblocks/src/cache/state.rs @@ -127,6 +127,73 @@ impl> FlashblockStateCache( + &self, + start: BlockNumber, + end: BlockNumber, + from_cache: impl Fn(&BlockAndReceipts) -> T, + from_provider: impl FnOnce( + core::ops::RangeInclusive, + &mut dyn FnMut(&T) -> bool, + ) -> ProviderResult>, + predicate: &mut dyn FnMut(&T) -> bool, + ) -> ProviderResult> { + let inner = self.inner.read(); + let mut cached_bars = Vec::new(); + let mut provider_end = end; + let mut index = end; + loop { + if let Some(bar) = inner.confirm_cache.get_block_by_number(index) { + cached_bars.push(bar); + provider_end = index.saturating_sub(1); + } else { + break; + } + if index == start { + break; + } + index -= 1; + } + cached_bars.reverse(); + drop(inner); + + // Delegate the provider prefix (if any) with the predicate + let has_provider_range = + provider_end >= start && cached_bars.len() < (end - start + 1) as usize; + let mut predicate_stopped = false; + let mut result = if has_provider_range { + let items = from_provider(start..=provider_end, predicate)?; + // If the provider returned fewer items than the full range, the predicate stopped + let expected = (provider_end - start + 1) as usize; + if items.len() < expected { + predicate_stopped = true; + } + items + } else { + Vec::new() + }; + + // Continue with cached items while predicate holds + if !predicate_stopped { + for bar in &cached_bars { + let item = from_cache(bar); + if !predicate(&item) { + break; + } + result.push(item); + } + } + + Ok(result) + } + /// Resolves an `impl RangeBounds` into an inclusive `(start, end)` pair. /// Matches reth's blockchain provider's convert_range_bounds semantics, and unbounded /// ends are resolved to `best_block_number`. From 26349627529cb9db1cc8432fee80e3700dd1b2ba Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 11 Mar 2026 12:22:22 +0800 Subject: [PATCH 11/76] fix(flashblocks-rpc): refactor range to add optional predicate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.6 --- crates/flashblocks/src/cache/block.rs | 9 ++-- crates/flashblocks/src/cache/header.rs | 7 +-- crates/flashblocks/src/cache/id.rs | 3 +- crates/flashblocks/src/cache/receipt.rs | 3 +- crates/flashblocks/src/cache/state.rs | 56 ++++----------------- crates/flashblocks/src/cache/transaction.rs | 3 +- 6 files changed, 25 insertions(+), 56 deletions(-) diff --git a/crates/flashblocks/src/cache/block.rs b/crates/flashblocks/src/cache/block.rs index 2694fa93..6f7872cf 100644 --- a/crates/flashblocks/src/cache/block.rs +++ b/crates/flashblocks/src/cache/block.rs @@ -97,7 +97,8 @@ impl> BlockReader *range.start(), *range.end(), |bar| block_from_bar(bar), - |r| self.provider.block_range(r), + |r, _| self.provider.block_range(r), + None, ) } @@ -109,7 +110,8 @@ impl> BlockReader *range.start(), *range.end(), |bar| (*bar.block).clone(), - |r| self.provider.block_with_senders_range(r), + |r, _| self.provider.block_with_senders_range(r), + None, ) } @@ -121,7 +123,8 @@ impl> BlockReader *range.start(), *range.end(), |bar| (*bar.block).clone(), - |r| self.provider.recovered_block_range(r), + |r, _| self.provider.recovered_block_range(r), + None, ) } diff --git a/crates/flashblocks/src/cache/header.rs b/crates/flashblocks/src/cache/header.rs index 41a4dbeb..86193169 100644 --- a/crates/flashblocks/src/cache/header.rs +++ b/crates/flashblocks/src/cache/header.rs @@ -38,7 +38,8 @@ impl> HeaderProvider start, end, |bar| bar.block.header().clone(), - |r| self.provider.headers_range(r), + |r, _| self.provider.headers_range(r), + None, ) } @@ -62,12 +63,12 @@ impl> HeaderProvider if start > end { return Ok(Vec::new()); } - self.collect_cached_block_range_while( + self.collect_cached_block_range( start, end, |bar| bar.block.sealed_header().clone(), |r, pred| self.provider.sealed_headers_while(r, pred), - &mut predicate, + Some(&mut predicate), ) } } diff --git a/crates/flashblocks/src/cache/id.rs b/crates/flashblocks/src/cache/id.rs index e76a13c3..d2313e1c 100644 --- a/crates/flashblocks/src/cache/id.rs +++ b/crates/flashblocks/src/cache/id.rs @@ -34,11 +34,12 @@ impl> BlockHashReader start, end - 1, |bar| bar.block.hash(), - |r| { + |r, _| { // Convert back to half-open [start, end) for the provider let end_exclusive = r.end().saturating_add(1); self.provider.canonical_hashes_range(*r.start(), end_exclusive) }, + None, ) } } diff --git a/crates/flashblocks/src/cache/receipt.rs b/crates/flashblocks/src/cache/receipt.rs index 6d182f0e..ebfbe67d 100644 --- a/crates/flashblocks/src/cache/receipt.rs +++ b/crates/flashblocks/src/cache/receipt.rs @@ -55,7 +55,8 @@ impl> ReceiptProvider *block_range.start(), *block_range.end(), |bar| (*bar.receipts).clone(), - |r| self.provider.receipts_by_block_range(r), + |r, _| self.provider.receipts_by_block_range(r), + None, ) } } diff --git a/crates/flashblocks/src/cache/state.rs b/crates/flashblocks/src/cache/state.rs index d05a92fe..a8f59176 100644 --- a/crates/flashblocks/src/cache/state.rs +++ b/crates/flashblocks/src/cache/state.rs @@ -91,50 +91,11 @@ impl> FlashblockStateCache( - &self, - start: BlockNumber, - end: BlockNumber, - from_cache: impl Fn(&BlockAndReceipts) -> T, - from_provider: impl FnOnce(core::ops::RangeInclusive) -> ProviderResult>, - ) -> ProviderResult> { - let inner = self.inner.read(); - let mut cache_items = Vec::new(); - let mut provider_end = end; - let mut index = end; - loop { - if let Some(bar) = inner.confirm_cache.get_block_by_number(index) { - cache_items.push(from_cache(&bar)); - provider_end = index.saturating_sub(1); - } else { - break; - } - if index == start { - break; - } - index -= 1; - } - cache_items.reverse(); - drop(inner); - - let mut result = if provider_end >= start && cache_items.len() < (end - start + 1) as usize - { - from_provider(start..=provider_end)? - } else { - Vec::new() - }; - result.extend(cache_items); - Ok(result) - } - - /// Collects items from an inclusive block number range `[start..=end]` while - /// a predicate holds, using the confirm cache as an overlay on top of the provider. /// - /// Same overlay strategy as [`collect_cached_block_range`](Self::collect_cached_block_range): - /// walks backward from `end` to find the consecutive cache tail, delegates the - /// provider prefix with the predicate, then continues through cached items. - /// Stops as soon as the predicate returns `false`. - pub(super) fn collect_cached_block_range_while( + /// When `predicate` is `Some`, items are filtered: the provider receives the + /// predicate to stop early, and cached items are checked before appending. + /// When `None`, all items in the range are collected unconditionally. + pub(super) fn collect_cached_block_range( &self, start: BlockNumber, end: BlockNumber, @@ -143,7 +104,7 @@ impl> FlashblockStateCache, &mut dyn FnMut(&T) -> bool, ) -> ProviderResult>, - predicate: &mut dyn FnMut(&T) -> bool, + predicate: Option<&mut dyn FnMut(&T) -> bool>, ) -> ProviderResult> { let inner = self.inner.read(); let mut cached_bars = Vec::new(); @@ -164,13 +125,15 @@ impl> FlashblockStateCache= start && cached_bars.len() < (end - start + 1) as usize; + + let mut always_true = |_: &T| true; + let predicate = predicate.unwrap_or(&mut always_true); + let mut predicate_stopped = false; let mut result = if has_provider_range { let items = from_provider(start..=provider_end, predicate)?; - // If the provider returned fewer items than the full range, the predicate stopped let expected = (provider_end - start + 1) as usize; if items.len() < expected { predicate_stopped = true; @@ -180,7 +143,6 @@ impl> FlashblockStateCache> TransactionsProvider start, end, |bar| bar.block.body().transactions().to_vec(), - |r| self.provider.transactions_by_block_range(r), + |r, _| self.provider.transactions_by_block_range(r), + None, ) } From e25008c89b18367e754863c94908a7cdf109610b Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 11 Mar 2026 15:50:53 +0800 Subject: [PATCH 12/76] chore(flashblocks-rpc): refactor cache, add tx index to pending sequence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- crates/flashblocks/src/cache/confirm.rs | 46 ++-- crates/flashblocks/src/cache/mod.rs | 277 +++++++++++++++++++- crates/flashblocks/src/cache/pending.rs | 14 +- crates/flashblocks/src/cache/receipt.rs | 2 +- crates/flashblocks/src/cache/state.rs | 242 ----------------- crates/flashblocks/src/cache/transaction.rs | 17 +- 6 files changed, 312 insertions(+), 286 deletions(-) delete mode 100644 crates/flashblocks/src/cache/state.rs diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index 4bbd9c7d..9e9406b8 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -1,32 +1,14 @@ -use std::{ - collections::{BTreeMap, HashMap}, - sync::Arc, -}; +use crate::cache::CachedTxInfo; +use std::collections::{BTreeMap, HashMap}; use alloy_consensus::transaction::TxHashRef; use alloy_primitives::{TxHash, B256}; use eyre::eyre; -use reth_primitives_traits::{BlockBody, NodePrimitives, ReceiptTy}; +use reth_primitives_traits::{BlockBody, NodePrimitives}; use reth_rpc_eth_types::block::BlockAndReceipts; const DEFAULT_CONFIRM_CACHE_SIZE: usize = 1_000; -/// Cached transaction info (block context, receipt and tx data) for O(1) lookups -/// by transaction hash. -#[derive(Debug, Clone)] -pub struct CachedTxInfo { - /// Block number containing the transaction. - pub block_number: u64, - /// Block hash containing the transaction. - pub block_hash: B256, - /// Index of the transaction within the block. - pub tx_index: u64, - /// The signed transaction. - pub tx: N::SignedTx, - /// The corresponding receipt. - pub receipt: ReceiptTy, -} - /// Confirmed flashblocks sequence cache that is ahead of the current node's canonical /// chainstate. We optimistically commit confirmed flashblocks sequences to the cache /// and flush them when the canonical chainstate catches up. @@ -45,7 +27,7 @@ pub struct ConfirmCache { /// Reverse index: block hash → block number for O(1) hash-based lookups. hash_to_number: HashMap, /// Transaction index: tx hash → cached tx info for O(1) tx/receipt lookups. - tx_index: HashMap>>, + tx_index: HashMap>, } impl Default for ConfirmCache { @@ -95,14 +77,16 @@ impl ConfirmCache { let receipts = block.receipts.as_ref(); for (idx, (tx, receipt)) in txs.iter().zip(receipts.iter()).enumerate() { let tx_hash = *tx.tx_hash(); - let info = Arc::new(CachedTxInfo { - block_number: height, - block_hash: hash, - tx_index: idx as u64, - tx: tx.clone(), - receipt: receipt.clone(), - }); - self.tx_index.insert(tx_hash, info); + self.tx_index.insert( + tx_hash, + CachedTxInfo { + block_number: height, + block_hash: hash, + tx_index: idx as u64, + tx: tx.clone(), + receipt: receipt.clone(), + }, + ); } // Build block index entries for block data @@ -139,7 +123,7 @@ impl ConfirmCache { } /// Returns the cached transaction info for the given tx hash, if present. - pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option>> { + pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option> { self.tx_index.get(tx_hash).cloned() } diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 5167acea..85fb43fa 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -6,13 +6,278 @@ mod id; mod pending; pub(crate) mod raw; mod receipt; -mod state; mod transaction; mod utils; -pub(crate) use confirm::ConfirmCache; -pub(crate) use pending::PendingSequence; -pub(crate) use raw::RawFlashblocksCache; -pub(crate) use utils::{block_from_bar, StateCacheProvider}; +pub use raw::RawFlashblocksCache; -pub use state::FlashblockStateCache; +use confirm::ConfirmCache; +use pending::PendingSequence; +use utils::{block_from_bar, StateCacheProvider}; + +use core::ops::RangeBounds; +use parking_lot::RwLock; +use std::sync::Arc; + +use alloy_consensus::BlockHeader; +use alloy_primitives::{BlockNumber, TxHash, B256}; +use reth_primitives_traits::{NodePrimitives, ReceiptTy}; +use reth_rpc_eth_types::block::BlockAndReceipts; +use reth_storage_api::{errors::provider::ProviderResult, BlockNumReader}; + +/// Cached transaction info (block context, receipt and tx data) for O(1) lookups +/// by transaction hash. +#[derive(Debug, Clone)] +pub struct CachedTxInfo { + /// Block number containing the transaction. + pub block_number: u64, + /// Block hash containing the transaction. + pub block_hash: B256, + /// Index of the transaction within the block. + pub tx_index: u64, + /// The signed transaction. + pub tx: N::SignedTx, + /// The corresponding receipt. + pub receipt: ReceiptTy, +} + +/// Top-level controller state cache for the flashblocks RPC layer. +/// +/// Composed of: +/// - **Pending**: the in-progress flashblock sequence being built from incoming +/// `OpFlashblockPayload` deltas (at most one active sequence at a time). +/// - **Confirmed**: completed flashblock sequences that have been committed but +/// are still ahead of the canonical chain. +/// +/// Implements all reth provider traits using the flashblocks state cache layer +/// as an overlay on top of the underlying chainstate `Provider`. +/// (`BlockReaderIdExt`, `StateProviderFactory`, etc.) +/// +/// **Lookup strategy:** +/// - **Confirmed state** (by hash/number): Check the flashblocks state cache +/// layer first, then fall back to the chainstate provider. +/// - **Latest**: Compare the flashblocks state cache's highest height vs the +/// chainstate provider's best height. Return whichever is higher, on tie we +/// prefer the chainstate provider. +/// - **Pending**: Returns the pending state from the flashblocks state cache. +/// - **All other IDs** (safe, finalized, historical, index-based): delegate +/// directly to the chainstate provider. +/// +/// Uses `Arc` for thread safety — a single lock protects all inner +/// state, ensuring atomic operations across pending, confirmed, and height +/// state (e.g. reorg detection + flush + insert in `handle_confirmed_block`). +#[derive(Debug, Clone)] +pub struct FlashblockStateCache { + pub(super) inner: Arc>>, + pub(super) provider: Provider, +} + +impl> FlashblockStateCache { + /// Creates a new [`FlashblockStateCache`]. + pub fn new(provider: Provider) -> eyre::Result { + let canon_height = provider.best_block_number()?; + Ok(Self { + inner: Arc::new(RwLock::new(FlashblockStateCacheInner::new(canon_height))), + provider, + }) + } + + /// Returns a reference to the underlying chainstate provider. + pub const fn provider(&self) -> &Provider { + &self.provider + } + + /// Handles a newly confirmed block by detecting reorgs, flushing invalidated + /// entries, and inserting into the confirm blocks cache. + pub fn handle_confirmed_block( + &self, + block_number: u64, + block_hash: B256, + block: BlockAndReceipts, + ) -> eyre::Result<()> { + self.inner.write().handle_confirmed_block(block_number, block_hash, block) + } + + /// Handles updating the pending state with a newly executed pending flashblocks + /// sequence. Note that it will replace any existing pending sequence. + pub fn handle_pending_sequence(&self, pending_sequence: PendingSequence) { + self.inner.write().handle_pending_sequence(pending_sequence) + } + + pub fn handle_canonical_block(&self, block_number: u64, block_hash: B256) { + self.inner.write().handle_canonical_block(block_number, block_hash) + } + + /// Returns the current confirmed cache height, if any blocks have been confirmed. + pub fn get_confirm_height(&self) -> Option { + self.inner.read().confirm_height + } + + /// Returns the current pending height, if any flashblocks have been executed. + pub fn get_pending_height(&self) -> Option { + self.inner.read().pending.as_ref().map(|p| p.pending.block().number()) + } + + /// Collects items from an inclusive block number range `[start..=end]`, using + /// the confirm cache as an overlay on top of the provider. + /// + /// Walks backward from `end`, collecting consecutive cache hits via `from_cache`. + /// Delegates the remaining prefix `[start..=provider_end]` to `from_provider`. + /// + /// When `predicate` is `Some`, items are filtered: the provider receives the + /// predicate to stop early, and cached items are checked before appending. + /// When `None`, all items in the range are collected unconditionally. + pub(super) fn collect_cached_block_range( + &self, + start: BlockNumber, + end: BlockNumber, + from_cache: impl Fn(&BlockAndReceipts) -> T, + from_provider: impl FnOnce( + core::ops::RangeInclusive, + &mut dyn FnMut(&T) -> bool, + ) -> ProviderResult>, + predicate: Option<&mut dyn FnMut(&T) -> bool>, + ) -> ProviderResult> { + let inner = self.inner.read(); + let mut cached_bars = Vec::new(); + let mut provider_end = end; + let mut index = end; + loop { + if let Some(bar) = inner.confirm_cache.get_block_by_number(index) { + cached_bars.push(bar); + provider_end = index.saturating_sub(1); + } else { + break; + } + if index == start { + break; + } + index -= 1; + } + cached_bars.reverse(); + drop(inner); + + let has_provider_range = + provider_end >= start && cached_bars.len() < (end - start + 1) as usize; + + let mut always_true = |_: &T| true; + let predicate = predicate.unwrap_or(&mut always_true); + + let mut predicate_stopped = false; + let mut result = if has_provider_range { + let items = from_provider(start..=provider_end, predicate)?; + let expected = (provider_end - start + 1) as usize; + if items.len() < expected { + predicate_stopped = true; + } + items + } else { + Vec::new() + }; + + if !predicate_stopped { + for bar in &cached_bars { + let item = from_cache(bar); + if !predicate(&item) { + break; + } + result.push(item); + } + } + + Ok(result) + } + + /// Resolves an `impl RangeBounds` into an inclusive `(start, end)` pair. + /// Matches reth's blockchain provider's convert_range_bounds semantics, and unbounded + /// ends are resolved to `best_block_number`. + pub(super) fn resolve_range_bounds( + &self, + range: impl RangeBounds, + ) -> ProviderResult<(BlockNumber, BlockNumber)> { + let start = match range.start_bound() { + core::ops::Bound::Included(&n) => n, + core::ops::Bound::Excluded(&n) => n + 1, + core::ops::Bound::Unbounded => 0, + }; + let end = match range.end_bound() { + core::ops::Bound::Included(&n) => n, + core::ops::Bound::Excluded(&n) => n - 1, + core::ops::Bound::Unbounded => self.best_block_number()?, + }; + Ok((start, end)) + } +} + +/// Inner state of the flashblocks state cache. +#[derive(Debug)] +pub(super) struct FlashblockStateCacheInner { + /// The current in-progress pending flashblock sequence, if any. + pub(super) pending: Option>, + /// Cache of confirmed flashblock sequences ahead of the canonical chain. + pub(super) confirm_cache: ConfirmCache, + /// The highest confirmed block height. + pub(super) confirm_height: Option, + /// The highest canonical block height. + pub(super) canon_height: u64, +} + +impl FlashblockStateCacheInner { + fn new(canon_height: u64) -> Self { + Self { + pending: None, + confirm_cache: ConfirmCache::new(), + confirm_height: None, + canon_height, + } + } + + /// Handles a newly confirmed block with reorg detection. + fn handle_confirmed_block( + &mut self, + block_number: u64, + block_hash: B256, + block: BlockAndReceipts, + ) -> eyre::Result<()> { + // Validation checks + if let Some(confirm_height) = self.confirm_height { + if block_number <= confirm_height { + // Reorg detected - confirm cache is polluted + return Err(eyre::eyre!( + "polluted state cache - trying to commit lower confirm height block" + )); + } + if block_number != confirm_height + 1 { + return Err(eyre::eyre!( + "polluted state cache - not next consecutive confirm height block" + )); + } + } + + // Commit new confirmed block to state cache + self.confirm_height = Some(block_number); + self.confirm_cache.insert(block_number, block_hash, block)?; + Ok(()) + } + + /// Looks up cached transaction info by hash: pending sequence first, then + /// confirm cache. Returns `None` if the tx is not in either cache layer. + pub(super) fn get_tx_info(&self, tx_hash: &TxHash) -> Option> { + self.pending + .as_ref() + .and_then(|p| p.get_tx_info(tx_hash)) + .or_else(|| self.confirm_cache.get_tx_info(tx_hash)) + } + + fn handle_pending_sequence(&mut self, pending_sequence: PendingSequence) { + self.pending = Some(pending_sequence); + } + + fn handle_canonical_block(&mut self, block_number: u64, block_hash: B256) { + self.canon_height = block_number; + self.confirm_cache.flush_up_to(block_number); + if self.pending.as_ref().and_then(|p| p.block_hash) == Some(block_hash) { + self.pending = None; + } + } +} diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs index d940416a..20d69b96 100644 --- a/crates/flashblocks/src/cache/pending.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -1,7 +1,9 @@ +use crate::cache::CachedTxInfo; use derive_more::Deref; +use std::collections::HashMap; use alloy_consensus::BlockHeader; -use alloy_primitives::B256; +use alloy_primitives::{TxHash, B256}; use reth_primitives_traits::NodePrimitives; use reth_revm::cached::CachedReads; use reth_rpc_eth_types::PendingBlock; @@ -13,6 +15,8 @@ pub struct PendingSequence { /// Locally built full pending block of the latest flashblocks sequence. #[deref] pub pending: PendingBlock, + /// Transaction index: tx hash → cached tx info for O(1) tx/receipt lookups. + tx_index: HashMap>, /// Cached reads from execution for reuse. pub cached_reads: CachedReads, /// Parent hash of the built block (may be non-canonical or canonical). @@ -30,12 +34,14 @@ impl PendingSequence { /// Create new pending flashblock. pub const fn new( pending: PendingBlock, + tx_index: HashMap>, cached_reads: CachedReads, parent_hash: B256, last_flashblock_index: u64, ) -> Self { Self { pending, + tx_index, cached_reads, parent_hash, last_flashblock_index, @@ -55,6 +61,12 @@ impl PendingSequence { self.block_hash = Some(self.pending.block().hash()); self.has_computed_state_root = true; } + + /// Returns the cached transaction info for the given tx hash, if present + /// in the pending sequence. + pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option> { + self.tx_index.get(tx_hash).cloned() + } } #[cfg(test)] diff --git a/crates/flashblocks/src/cache/receipt.rs b/crates/flashblocks/src/cache/receipt.rs index ebfbe67d..1a29297f 100644 --- a/crates/flashblocks/src/cache/receipt.rs +++ b/crates/flashblocks/src/cache/receipt.rs @@ -16,7 +16,7 @@ impl> ReceiptProvider } fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { - if let Some(info) = self.inner.read().confirm_cache.get_tx_info(&hash) { + if let Some(info) = self.inner.read().get_tx_info(&hash) { return Ok(Some(info.receipt.clone())); } self.provider.receipt_by_hash(hash) diff --git a/crates/flashblocks/src/cache/state.rs b/crates/flashblocks/src/cache/state.rs deleted file mode 100644 index a8f59176..00000000 --- a/crates/flashblocks/src/cache/state.rs +++ /dev/null @@ -1,242 +0,0 @@ -use crate::cache::{ConfirmCache, PendingSequence, StateCacheProvider}; -use core::ops::RangeBounds; -use parking_lot::RwLock; -use std::sync::Arc; - -use alloy_consensus::BlockHeader; -use alloy_primitives::{BlockNumber, B256}; -use reth_primitives_traits::NodePrimitives; -use reth_rpc_eth_types::block::BlockAndReceipts; -use reth_storage_api::{errors::provider::ProviderResult, BlockNumReader}; - -/// Top-level controller state cache for the flashblocks RPC layer. -/// -/// Composed of: -/// - **Pending**: the in-progress flashblock sequence being built from incoming -/// `OpFlashblockPayload` deltas (at most one active sequence at a time). -/// - **Confirmed**: completed flashblock sequences that have been committed but -/// are still ahead of the canonical chain. -/// -/// Implements all reth provider traits using the flashblocks state cache layer -/// as an overlay on top of the underlying chainstate `Provider`. -/// (`BlockReaderIdExt`, `StateProviderFactory`, etc.) -/// -/// **Lookup strategy:** -/// - **Confirmed state** (by hash/number): Check the flashblocks state cache -/// layer first, then fall back to the chainstate provider. -/// - **Latest**: Compare the flashblocks state cache's highest height vs the -/// chainstate provider's best height. Return whichever is higher, on tie we -/// prefer the chainstate provider. -/// - **Pending**: Returns the pending state from the flashblocks state cache. -/// - **All other IDs** (safe, finalized, historical, index-based): delegate -/// directly to the chainstate provider. -/// -/// Uses `Arc` for thread safety — a single lock protects all inner -/// state, ensuring atomic operations across pending, confirmed, and height -/// state (e.g. reorg detection + flush + insert in `handle_confirmed_block`). -#[derive(Debug, Clone)] -pub struct FlashblockStateCache { - pub(super) inner: Arc>>, - pub(super) provider: Provider, -} - -impl> FlashblockStateCache { - /// Creates a new [`FlashblockStateCache`]. - pub fn new(provider: Provider) -> eyre::Result { - let canon_height = provider.best_block_number()?; - Ok(Self { - inner: Arc::new(RwLock::new(FlashblockStateCacheInner::new(canon_height))), - provider, - }) - } - - /// Returns a reference to the underlying chainstate provider. - pub const fn provider(&self) -> &Provider { - &self.provider - } - - /// Handles a newly confirmed block by detecting reorgs, flushing invalidated - /// entries, and inserting into the confirm blocks cache. - pub fn handle_confirmed_block( - &self, - block_number: u64, - block_hash: B256, - block: BlockAndReceipts, - ) -> eyre::Result<()> { - self.inner.write().handle_confirmed_block(block_number, block_hash, block) - } - - /// Handles updating the pending state with a newly executed pending flashblocks - /// sequence. Note that it will replace any existing pending sequence. - pub fn handle_pending_sequence(&self, pending_sequence: PendingSequence) { - self.inner.write().handle_pending_sequence(pending_sequence) - } - - pub fn handle_canonical_block(&self, block_number: u64, block_hash: B256) { - self.inner.write().handle_canonical_block(block_number, block_hash) - } - - /// Returns the current confirmed cache height, if any blocks have been confirmed. - pub fn get_confirm_height(&self) -> Option { - self.inner.read().confirm_height - } - - /// Returns the current pending height, if any flashblocks have been executed. - pub fn get_pending_height(&self) -> Option { - self.inner.read().pending.as_ref().map(|p| p.pending.block().number()) - } - - /// Collects items from an inclusive block number range `[start..=end]`, using - /// the confirm cache as an overlay on top of the provider. - /// - /// Walks backward from `end`, collecting consecutive cache hits via `from_cache`. - /// Delegates the remaining prefix `[start..=provider_end]` to `from_provider`. - /// - /// When `predicate` is `Some`, items are filtered: the provider receives the - /// predicate to stop early, and cached items are checked before appending. - /// When `None`, all items in the range are collected unconditionally. - pub(super) fn collect_cached_block_range( - &self, - start: BlockNumber, - end: BlockNumber, - from_cache: impl Fn(&BlockAndReceipts) -> T, - from_provider: impl FnOnce( - core::ops::RangeInclusive, - &mut dyn FnMut(&T) -> bool, - ) -> ProviderResult>, - predicate: Option<&mut dyn FnMut(&T) -> bool>, - ) -> ProviderResult> { - let inner = self.inner.read(); - let mut cached_bars = Vec::new(); - let mut provider_end = end; - let mut index = end; - loop { - if let Some(bar) = inner.confirm_cache.get_block_by_number(index) { - cached_bars.push(bar); - provider_end = index.saturating_sub(1); - } else { - break; - } - if index == start { - break; - } - index -= 1; - } - cached_bars.reverse(); - drop(inner); - - let has_provider_range = - provider_end >= start && cached_bars.len() < (end - start + 1) as usize; - - let mut always_true = |_: &T| true; - let predicate = predicate.unwrap_or(&mut always_true); - - let mut predicate_stopped = false; - let mut result = if has_provider_range { - let items = from_provider(start..=provider_end, predicate)?; - let expected = (provider_end - start + 1) as usize; - if items.len() < expected { - predicate_stopped = true; - } - items - } else { - Vec::new() - }; - - if !predicate_stopped { - for bar in &cached_bars { - let item = from_cache(bar); - if !predicate(&item) { - break; - } - result.push(item); - } - } - - Ok(result) - } - - /// Resolves an `impl RangeBounds` into an inclusive `(start, end)` pair. - /// Matches reth's blockchain provider's convert_range_bounds semantics, and unbounded - /// ends are resolved to `best_block_number`. - pub(super) fn resolve_range_bounds( - &self, - range: impl RangeBounds, - ) -> ProviderResult<(BlockNumber, BlockNumber)> { - let start = match range.start_bound() { - core::ops::Bound::Included(&n) => n, - core::ops::Bound::Excluded(&n) => n + 1, - core::ops::Bound::Unbounded => 0, - }; - let end = match range.end_bound() { - core::ops::Bound::Included(&n) => n, - core::ops::Bound::Excluded(&n) => n - 1, - core::ops::Bound::Unbounded => self.best_block_number()?, - }; - Ok((start, end)) - } -} - -/// Inner state of the flashblocks state cache. -#[derive(Debug)] -pub(super) struct FlashblockStateCacheInner { - /// The current in-progress pending flashblock sequence, if any. - pub(super) pending: Option>, - /// Cache of confirmed flashblock sequences ahead of the canonical chain. - pub(super) confirm_cache: ConfirmCache, - /// The highest confirmed block height. - pub(super) confirm_height: Option, - /// The highest canonical block height. - pub(super) canon_height: u64, -} - -impl FlashblockStateCacheInner { - fn new(canon_height: u64) -> Self { - Self { - pending: None, - confirm_cache: ConfirmCache::new(), - confirm_height: None, - canon_height, - } - } - - /// Handles a newly confirmed block with reorg detection. - fn handle_confirmed_block( - &mut self, - block_number: u64, - block_hash: B256, - block: BlockAndReceipts, - ) -> eyre::Result<()> { - // Validation checks - if let Some(confirm_height) = self.confirm_height { - if block_number <= confirm_height { - // Reorg detected - confirm cache is polluted - return Err(eyre::eyre!( - "polluted state cache - trying to commit lower confirm height block" - )); - } - if block_number != confirm_height + 1 { - return Err(eyre::eyre!( - "polluted state cache - not next consecutive confirm height block" - )); - } - } - - // Commit new confirmed block to state cache - self.confirm_height = Some(block_number); - self.confirm_cache.insert(block_number, block_hash, block)?; - Ok(()) - } - - fn handle_pending_sequence(&mut self, pending_sequence: PendingSequence) { - self.pending = Some(pending_sequence); - } - - fn handle_canonical_block(&mut self, block_number: u64, block_hash: B256) { - self.canon_height = block_number; - self.confirm_cache.flush_up_to(block_number); - if self.pending.as_ref().and_then(|p| p.block_hash) == Some(block_hash) { - self.pending = None; - } - } -} diff --git a/crates/flashblocks/src/cache/transaction.rs b/crates/flashblocks/src/cache/transaction.rs index 5d097f3d..c50405f0 100644 --- a/crates/flashblocks/src/cache/transaction.rs +++ b/crates/flashblocks/src/cache/transaction.rs @@ -28,7 +28,7 @@ impl> TransactionsProvider } fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { - if let Some(info) = self.inner.read().confirm_cache.get_tx_info(&hash) { + if let Some(info) = self.inner.read().get_tx_info(&hash) { return Ok(Some(info.tx.clone())); } self.provider.transaction_by_hash(hash) @@ -39,14 +39,21 @@ impl> TransactionsProvider hash: TxHash, ) -> ProviderResult> { let inner = self.inner.read(); - if let Some(info) = inner.confirm_cache.get_tx_info(&hash) { - // Resolve block header fields from the confirm cache - let bar = inner.confirm_cache.get_block_by_number(info.block_number); - let (base_fee, excess_blob_gas, timestamp) = bar + if let Some(info) = inner.get_tx_info(&hash) { + // Resolve block header fields: try confirm cache first, then pending block + let (base_fee, excess_blob_gas, timestamp) = inner + .confirm_cache + .get_block_by_number(info.block_number) .map(|b| { let h = b.block.header(); (h.base_fee_per_gas(), h.excess_blob_gas(), h.timestamp()) }) + .or_else(|| { + inner.pending.as_ref().map(|p| { + let h = p.pending.block().header(); + (h.base_fee_per_gas(), h.excess_blob_gas(), h.timestamp()) + }) + }) .unwrap_or_default(); let meta = TransactionMeta { From e94f96901c3d76675f50aa408e1858a0c73ed510 Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 11 Mar 2026 17:58:51 +0800 Subject: [PATCH 13/76] refactor(flashblocks-rpc): move provider impls to eth api override in rpc crate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Consolidates flashblocks cache provider trait implementations (block, header, factory, receipt, transaction) into a single eth.rs override module in the rpc crate. Wires FlashblockStateCache into the node's RPC registration. Simplifies the cache layer to only manage state. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- Cargo.lock | 13 +- bin/node/src/main.rs | 37 +- crates/flashblocks/src/cache/block.rs | 177 ----- crates/flashblocks/src/cache/confirm.rs | 20 +- crates/flashblocks/src/cache/factory.rs | 77 -- crates/flashblocks/src/cache/header.rs | 74 -- crates/flashblocks/src/cache/id.rs | 103 --- crates/flashblocks/src/cache/mod.rs | 371 +++++---- crates/flashblocks/src/cache/pending.rs | 10 +- crates/flashblocks/src/cache/receipt.rs | 67 -- crates/flashblocks/src/cache/transaction.rs | 126 ---- crates/flashblocks/src/cache/utils.rs | 30 +- crates/rpc/Cargo.toml | 23 +- crates/rpc/src/eth.rs | 796 ++++++++++++++++++++ crates/rpc/src/lib.rs | 22 +- crates/rpc/src/xlayer_ext.rs | 109 +-- 16 files changed, 1094 insertions(+), 961 deletions(-) delete mode 100644 crates/flashblocks/src/cache/block.rs delete mode 100644 crates/flashblocks/src/cache/factory.rs delete mode 100644 crates/flashblocks/src/cache/header.rs delete mode 100644 crates/flashblocks/src/cache/id.rs delete mode 100644 crates/flashblocks/src/cache/receipt.rs delete mode 100644 crates/flashblocks/src/cache/transaction.rs create mode 100644 crates/rpc/src/eth.rs diff --git a/Cargo.lock b/Cargo.lock index 2dd0d503..4b76140b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14410,12 +14410,23 @@ dependencies = [ name = "xlayer-rpc" version = "0.1.0" dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rpc-types-eth", + "async-trait", "jsonrpsee", + "op-alloy-network", + "reth-optimism-primitives", "reth-optimism-rpc", + "reth-primitives-traits", "reth-rpc", + "reth-rpc-convert", "reth-rpc-eth-api", - "serde", + "reth-rpc-eth-types", "tokio", + "tracing", + "xlayer-flashblocks", ] [[package]] diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index 41cb4ad7..8ac929e1 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -11,12 +11,12 @@ use either::Either; use std::sync::Arc; use tracing::info; -use op_alloy_network::Optimism; use reth::rpc::eth::EthApiTypes; use reth::{ builder::{DebugNodeLauncher, EngineNodeLauncher, Node, NodeHandle, TreeConfig}, providers::providers::BlockchainProvider, }; +use reth::providers::BlockNumReader; use reth_node_api::FullNodeComponents; use reth_optimism_cli::Cli; use reth_optimism_node::{args::RollupArgs, OpNode}; @@ -26,7 +26,10 @@ use xlayer_chainspec::XLayerChainSpecParser; use xlayer_flashblocks::{handle::FlashblocksService, subscription::FlashblocksPubSub}; use xlayer_legacy_rpc::{layer::LegacyRpcRouterLayer, LegacyRpcRouterConfig}; use xlayer_monitor::{start_monitor_handle, RpcMonitorLayer, XLayerMonitor}; -use xlayer_rpc::xlayer_ext::{XlayerRpcExt, XlayerRpcExtApiServer}; +use xlayer_flashblocks::cache::FlashblockStateCache; +use xlayer_rpc::{ + EthApiOverrideServer, XLayerEthApiExt, XlayerRpcExt, XlayerRpcExtApiServer, +}; #[global_allocator] static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); @@ -156,9 +159,33 @@ fn main() { } } - // Register X Layer RPC - let xlayer_rpc = XlayerRpcExt { backend: new_op_eth_api }; - ctx.modules.merge_configured(XlayerRpcExtApiServer::::into_rpc( + // Create flashblocks state cache if flashblocks URL is configured. + // Shared between the Eth API override and the ext RPC. + let flash_cache = if args.rollup_args.flashblocks_url.is_some() { + let canon_height = ctx.node().provider().best_block_number()?; + Some(FlashblockStateCache::new(canon_height)) + } else { + None + }; + + // Register flashblocks Eth API override (replaces subset of eth_ methods) + if let Some(ref cache) = flash_cache { + let eth_filter = ctx.registry.eth_handlers().filter.clone(); + let eth_override = XLayerEthApiExt::new( + ctx.registry.eth_api().clone(), + eth_filter, + cache.clone(), + ); + ctx.modules.add_or_replace_if_module_configured( + RethRpcModule::Eth, + EthApiOverrideServer::into_rpc(eth_override), + )?; + info!(target: "reth::cli", "xlayer flashblocks eth api override enabled"); + } + + // Register X Layer RPC (eth_flashblocksEnabled) — always active + let xlayer_rpc = XlayerRpcExt::new(flash_cache); + ctx.modules.merge_configured(XlayerRpcExtApiServer::into_rpc( xlayer_rpc, ))?; info!(target: "reth::cli", "xlayer rpc extension enabled"); diff --git a/crates/flashblocks/src/cache/block.rs b/crates/flashblocks/src/cache/block.rs deleted file mode 100644 index 6f7872cf..00000000 --- a/crates/flashblocks/src/cache/block.rs +++ /dev/null @@ -1,177 +0,0 @@ -use crate::cache::{block_from_bar, FlashblockStateCache, StateCacheProvider}; - -use alloy_eips::{BlockHashOrNumber, BlockId}; -use alloy_primitives::{BlockNumber, TxNumber, B256}; -use core::ops::RangeInclusive; -use reth_db_models::StoredBlockBodyIndices; -use reth_primitives_traits::{BlockTy, NodePrimitives, RecoveredBlock, SealedHeader}; -use reth_storage_api::{ - errors::provider::ProviderResult, BlockBodyIndicesProvider, BlockReader, BlockReaderIdExt, - BlockSource, HeaderProvider, TransactionVariant, -}; - -impl> BlockReader - for FlashblockStateCache -{ - type Block = BlockTy; - - fn find_block_by_hash( - &self, - hash: B256, - source: BlockSource, - ) -> ProviderResult> { - if let Some(bar) = self.inner.read().confirm_cache.get_block_by_hash(&hash) { - return Ok(Some(block_from_bar(&bar))); - } - self.provider.find_block_by_hash(hash, source) - } - - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { - let cached = match id { - BlockHashOrNumber::Hash(hash) => { - self.inner.read().confirm_cache.get_block_by_hash(&hash) - } - BlockHashOrNumber::Number(num) => { - self.inner.read().confirm_cache.get_block_by_number(num) - } - }; - if let Some(bar) = cached { - return Ok(Some(block_from_bar(&bar))); - } - self.provider.block(id) - } - - fn pending_block(&self) -> ProviderResult>> { - { - let inner = self.inner.read(); - if let Some(pending) = &inner.pending { - return Ok(Some(pending.pending.block().as_ref().clone())); - } - } - self.provider.pending_block() - } - - fn pending_block_and_receipts( - &self, - ) -> ProviderResult, Vec)>> { - { - let inner = self.inner.read(); - if let Some(pending) = &inner.pending { - let block = pending.pending.block().as_ref().clone(); - let receipts = pending.pending.receipts.as_ref().clone(); - return Ok(Some((block, receipts))); - } - } - self.provider.pending_block_and_receipts() - } - - fn recovered_block( - &self, - id: BlockHashOrNumber, - transaction_kind: TransactionVariant, - ) -> ProviderResult>> { - let cached = match id { - BlockHashOrNumber::Hash(hash) => { - self.inner.read().confirm_cache.get_block_by_hash(&hash) - } - BlockHashOrNumber::Number(num) => { - self.inner.read().confirm_cache.get_block_by_number(num) - } - }; - if let Some(bar) = cached { - return Ok(Some((*bar.block).clone())); - } - self.provider.recovered_block(id, transaction_kind) - } - - fn sealed_block_with_senders( - &self, - id: BlockHashOrNumber, - transaction_kind: TransactionVariant, - ) -> ProviderResult>> { - self.recovered_block(id, transaction_kind) - } - - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - self.collect_cached_block_range( - *range.start(), - *range.end(), - |bar| block_from_bar(bar), - |r, _| self.provider.block_range(r), - None, - ) - } - - fn block_with_senders_range( - &self, - range: RangeInclusive, - ) -> ProviderResult>> { - self.collect_cached_block_range( - *range.start(), - *range.end(), - |bar| (*bar.block).clone(), - |r, _| self.provider.block_with_senders_range(r), - None, - ) - } - - fn recovered_block_range( - &self, - range: RangeInclusive, - ) -> ProviderResult>> { - self.collect_cached_block_range( - *range.start(), - *range.end(), - |bar| (*bar.block).clone(), - |r, _| self.provider.recovered_block_range(r), - None, - ) - } - - fn block_by_transaction_id(&self, id: TxNumber) -> ProviderResult> { - self.provider.block_by_transaction_id(id) - } -} - -impl> BlockReaderIdExt - for FlashblockStateCache -{ - fn block_by_id(&self, id: BlockId) -> ProviderResult> { - match id { - BlockId::Hash(hash) => self.block_by_hash(hash.into()), - BlockId::Number(num) => self.block_by_number_or_tag(num), - } - } - - fn sealed_header_by_id( - &self, - id: BlockId, - ) -> ProviderResult>> { - match id { - BlockId::Hash(hash) => self.sealed_header_by_hash(hash.into()), - BlockId::Number(tag) => self.sealed_header_by_number_or_tag(tag), - } - } - - fn header_by_id(&self, id: BlockId) -> ProviderResult> { - match id { - BlockId::Hash(hash) => self.header(hash.into()), - BlockId::Number(num) => self.header_by_number_or_tag(num), - } - } -} - -impl> BlockBodyIndicesProvider - for FlashblockStateCache -{ - fn block_body_indices(&self, num: u64) -> ProviderResult> { - self.provider.block_body_indices(num) - } - - fn block_body_indices_range( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - self.provider.block_body_indices_range(range) - } -} diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index 9e9406b8..692667a7 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -60,12 +60,7 @@ impl ConfirmCache { /// before inserting if a reorg is detected. /// /// Returns an error if the cache is at max capacity. - pub fn insert( - &mut self, - height: u64, - hash: B256, - block: BlockAndReceipts, - ) -> eyre::Result<()> { + pub fn insert(&mut self, height: u64, block: BlockAndReceipts) -> eyre::Result<()> { if self.blocks.len() >= DEFAULT_CONFIRM_CACHE_SIZE { return Err(eyre!( "confirm cache at max capacity ({DEFAULT_CONFIRM_CACHE_SIZE}), cannot insert block: {height}" @@ -73,6 +68,7 @@ impl ConfirmCache { } // Build tx index entries for all transactions in this block + let hash = block.block.hash(); let txs = block.block.body().transactions(); let receipts = block.receipts.as_ref(); for (idx, (tx, receipt)) in txs.iter().zip(receipts.iter()).enumerate() { @@ -160,18 +156,6 @@ impl ConfirmCache { } } - /// Flushes all entries with block number >= `from` (the reorged range). - /// Returns the number of entries flushed. - pub fn flush_from(&mut self, from: u64) -> usize { - let reorged = self.blocks.split_off(&from); - let count = reorged.len(); - for (hash, bar) in reorged.into_values() { - self.hash_to_number.remove(&hash); - self.remove_tx_index_for_block(&bar); - } - count - } - /// Flushes all entries with block number <= `canonical_number`. /// /// Called when the canonical chain catches up to the confirmed cache. diff --git a/crates/flashblocks/src/cache/factory.rs b/crates/flashblocks/src/cache/factory.rs deleted file mode 100644 index 008c7311..00000000 --- a/crates/flashblocks/src/cache/factory.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::cache::{FlashblockStateCache, StateCacheProvider}; - -use alloy_eips::BlockNumberOrTag; -use alloy_primitives::{BlockNumber, B256}; -use reth_primitives_traits::NodePrimitives; -use reth_storage_api::{errors::provider::ProviderResult, StateProviderBox, StateProviderFactory}; - -impl> StateProviderFactory - for FlashblockStateCache -{ - fn latest(&self) -> ProviderResult { - // Determine effective latest: if confirm cache is strictly ahead of the - // provider's best block, use the confirmed block's hash to resolve state - // from the engine tree. Otherwise, use the provider's latest. - let provider_best = self.provider.best_block_number()?; - let inner = self.inner.read(); - if let Some(confirm_height) = inner.confirm_height { - if confirm_height > provider_best { - if let Some(hash) = inner.confirm_cache.hash_for_number(confirm_height) { - drop(inner); - return self.provider.state_by_block_hash(hash); - } - } - } - drop(inner); - self.provider.latest() - } - - fn state_by_block_number_or_tag( - &self, - number_or_tag: BlockNumberOrTag, - ) -> ProviderResult { - match number_or_tag { - BlockNumberOrTag::Latest => self.latest(), - BlockNumberOrTag::Pending => self.pending(), - other => self.provider.state_by_block_number_or_tag(other), - } - } - - fn history_by_block_number(&self, block: BlockNumber) -> ProviderResult { - // If the requested block is in the confirm cache (ahead of canonical), - // resolve via hash so the engine tree can serve it. - if let Some(hash) = self.inner.read().confirm_cache.hash_for_number(block) { - return self.provider.state_by_block_hash(hash); - } - self.provider.history_by_block_number(block) - } - - fn history_by_block_hash(&self, block: B256) -> ProviderResult { - // If the hash is in our confirm cache, route through `state_by_block_hash` - // which also covers the engine tree's in-memory state. - if self.inner.read().confirm_cache.contains_hash(&block) { - return self.provider.state_by_block_hash(block); - } - self.provider.history_by_block_hash(block) - } - - fn state_by_block_hash(&self, block: B256) -> ProviderResult { - self.provider.state_by_block_hash(block) - } - - fn pending(&self) -> ProviderResult { - // Delegate to the underlying provider. The engine tree should have the - // pending block's world state if it has been submitted via engine API. - // Building a custom state overlay from the `PendingSequence`'s - // `ExecutedBlock` is a future enhancement. - self.provider.pending() - } - - fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult> { - self.provider.pending_state_by_hash(block_hash) - } - - fn maybe_pending(&self) -> ProviderResult> { - self.provider.maybe_pending() - } -} diff --git a/crates/flashblocks/src/cache/header.rs b/crates/flashblocks/src/cache/header.rs deleted file mode 100644 index 86193169..00000000 --- a/crates/flashblocks/src/cache/header.rs +++ /dev/null @@ -1,74 +0,0 @@ -use crate::cache::{FlashblockStateCache, StateCacheProvider}; - -use alloy_primitives::{BlockNumber, B256}; -use core::ops::RangeBounds; -use reth_primitives_traits::{HeaderTy, NodePrimitives, SealedHeader}; -use reth_storage_api::{errors::provider::ProviderResult, HeaderProvider}; - -impl> HeaderProvider - for FlashblockStateCache -{ - type Header = HeaderTy; - - fn header(&self, block_hash: B256) -> ProviderResult> { - if let Some(bar) = self.inner.read().confirm_cache.get_block_by_hash(&block_hash) { - return Ok(Some(bar.block.header().clone())); - } - // Cache miss, delegate to the provider - self.provider.header(block_hash) - } - - fn header_by_number(&self, num: u64) -> ProviderResult> { - if let Some(bar) = self.inner.read().confirm_cache.get_block_by_number(num) { - return Ok(Some(bar.block.header().clone())); - } - // Cache miss, delegate to the provider - self.provider.header_by_number(num) - } - - fn headers_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - let (start, end) = self.resolve_range_bounds(range)?; - if start > end { - return Ok(Vec::new()); - } - self.collect_cached_block_range( - start, - end, - |bar| bar.block.header().clone(), - |r, _| self.provider.headers_range(r), - None, - ) - } - - fn sealed_header( - &self, - number: BlockNumber, - ) -> ProviderResult>> { - if let Some(bar) = self.inner.read().confirm_cache.get_block_by_number(number) { - return Ok(Some(bar.block.sealed_header().clone())); - } - // Cache miss, delegate to the provider - self.provider.sealed_header(number) - } - - fn sealed_headers_while( - &self, - range: impl RangeBounds, - mut predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult>> { - let (start, end) = self.resolve_range_bounds(range)?; - if start > end { - return Ok(Vec::new()); - } - self.collect_cached_block_range( - start, - end, - |bar| bar.block.sealed_header().clone(), - |r, pred| self.provider.sealed_headers_while(r, pred), - Some(&mut predicate), - ) - } -} diff --git a/crates/flashblocks/src/cache/id.rs b/crates/flashblocks/src/cache/id.rs deleted file mode 100644 index d2313e1c..00000000 --- a/crates/flashblocks/src/cache/id.rs +++ /dev/null @@ -1,103 +0,0 @@ -use crate::cache::{FlashblockStateCache, StateCacheProvider}; - -use alloy_consensus::BlockHeader; -use alloy_eips::BlockNumHash; -use alloy_primitives::{BlockNumber, B256}; -use reth_chainspec::ChainInfo; -use reth_primitives_traits::NodePrimitives; -use reth_storage_api::{ - errors::provider::ProviderResult, BlockHashReader, BlockIdReader, BlockNumReader, -}; - -impl> BlockHashReader - for FlashblockStateCache -{ - fn block_hash(&self, number: BlockNumber) -> ProviderResult> { - if let Some(hash) = self.inner.read().confirm_cache.hash_for_number(number) { - return Ok(Some(hash)); - } - // Cache miss, delegate to the provider - self.provider.block_hash(number) - } - - fn canonical_hashes_range( - &self, - start: BlockNumber, - end: BlockNumber, - ) -> ProviderResult> { - if start >= end { - // Aligns with underlying blockchain provider - return Ok(Vec::new()); - } - // Provider uses half-open [start, end), convert to inclusive for the helper - self.collect_cached_block_range( - start, - end - 1, - |bar| bar.block.hash(), - |r, _| { - // Convert back to half-open [start, end) for the provider - let end_exclusive = r.end().saturating_add(1); - self.provider.canonical_hashes_range(*r.start(), end_exclusive) - }, - None, - ) - } -} - -impl> BlockNumReader - for FlashblockStateCache -{ - fn chain_info(&self) -> ProviderResult { - let mut info = self.provider.chain_info()?; - let inner = self.inner.read(); - if let Some(h) = inner.confirm_height - && h > info.best_number - && let Some(hash) = inner.confirm_cache.hash_for_number(h) - { - info.best_number = h; - info.best_hash = hash; - } - Ok(info) - } - - fn best_block_number(&self) -> ProviderResult { - let provider_height = self.provider.best_block_number()?; - // If confirm cache is strictly ahead, report that. On tie, prefer provider - Ok(self.inner.read().confirm_height.map_or(provider_height, |h| h.max(provider_height))) - } - - fn last_block_number(&self) -> ProviderResult { - self.provider.last_block_number() - } - - fn block_number(&self, hash: B256) -> ProviderResult> { - if let Some(num) = self.inner.read().confirm_cache.number_for_hash(&hash) { - return Ok(Some(num)); - } - // Cache miss, delegate to the provider - self.provider.block_number(hash) - } -} - -impl> BlockIdReader - for FlashblockStateCache -{ - fn pending_block_num_hash(&self) -> ProviderResult> { - let inner = self.inner.read(); - if let Some(pending) = &inner.pending { - let block = pending.pending.block(); - return Ok(Some(BlockNumHash::new(block.number(), block.hash()))); - } - drop(inner); - // Cache miss, delegate to the provider - self.provider.pending_block_num_hash() - } - - fn safe_block_num_hash(&self) -> ProviderResult> { - self.provider.safe_block_num_hash() - } - - fn finalized_block_num_hash(&self) -> ProviderResult> { - self.provider.finalized_block_num_hash() - } -} diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 85fb43fa..6c0209e1 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -1,29 +1,19 @@ -mod block; mod confirm; -mod factory; -mod header; -mod id; -mod pending; +pub mod pending; pub(crate) mod raw; -mod receipt; -mod transaction; -mod utils; +pub(crate) mod utils; +pub use confirm::ConfirmCache; +pub use pending::PendingSequence; pub use raw::RawFlashblocksCache; -use confirm::ConfirmCache; -use pending::PendingSequence; -use utils::{block_from_bar, StateCacheProvider}; - -use core::ops::RangeBounds; use parking_lot::RwLock; use std::sync::Arc; +use tracing::*; -use alloy_consensus::BlockHeader; -use alloy_primitives::{BlockNumber, TxHash, B256}; +use alloy_primitives::{TxHash, B256}; use reth_primitives_traits::{NodePrimitives, ReceiptTy}; use reth_rpc_eth_types::block::BlockAndReceipts; -use reth_storage_api::{errors::provider::ProviderResult, BlockNumReader}; /// Cached transaction info (block context, receipt and tx data) for O(1) lookups /// by transaction hash. @@ -43,241 +33,242 @@ pub struct CachedTxInfo { /// Top-level controller state cache for the flashblocks RPC layer. /// -/// Composed of: +/// Pure data store composed of: /// - **Pending**: the in-progress flashblock sequence being built from incoming /// `OpFlashblockPayload` deltas (at most one active sequence at a time). /// - **Confirmed**: completed flashblock sequences that have been committed but /// are still ahead of the canonical chain. /// -/// Implements all reth provider traits using the flashblocks state cache layer -/// as an overlay on top of the underlying chainstate `Provider`. -/// (`BlockReaderIdExt`, `StateProviderFactory`, etc.) -/// -/// **Lookup strategy:** -/// - **Confirmed state** (by hash/number): Check the flashblocks state cache -/// layer first, then fall back to the chainstate provider. -/// - **Latest**: Compare the flashblocks state cache's highest height vs the -/// chainstate provider's best height. Return whichever is higher, on tie we -/// prefer the chainstate provider. -/// - **Pending**: Returns the pending state from the flashblocks state cache. -/// - **All other IDs** (safe, finalized, historical, index-based): delegate -/// directly to the chainstate provider. +/// This cache is a **data source** — it does not wrap a provider or implement +/// any reth provider traits. The RPC override handler decides when to query +/// this cache vs the underlying chainstate provider. /// /// Uses `Arc` for thread safety — a single lock protects all inner /// state, ensuring atomic operations across pending, confirmed, and height /// state (e.g. reorg detection + flush + insert in `handle_confirmed_block`). #[derive(Debug, Clone)] -pub struct FlashblockStateCache { - pub(super) inner: Arc>>, - pub(super) provider: Provider, +pub struct FlashblockStateCache { + inner: Arc>>, } -impl> FlashblockStateCache { +// FlashblockStateCache read interfaces +impl FlashblockStateCache { /// Creates a new [`FlashblockStateCache`]. - pub fn new(provider: Provider) -> eyre::Result { - let canon_height = provider.best_block_number()?; - Ok(Self { - inner: Arc::new(RwLock::new(FlashblockStateCacheInner::new(canon_height))), - provider, - }) + pub fn new() -> Self { + Self { inner: Arc::new(RwLock::new(FlashblockStateCacheInner::new())) } } +} - /// Returns a reference to the underlying chainstate provider. - pub const fn provider(&self) -> &Provider { - &self.provider +// FlashblockStateCache read height interfaces +impl FlashblockStateCache { + /// Returns the current confirmed height. + pub fn get_confirm_height(&self) -> u64 { + self.inner.read().confirm_height } - /// Handles a newly confirmed block by detecting reorgs, flushing invalidated - /// entries, and inserting into the confirm blocks cache. - pub fn handle_confirmed_block( - &self, - block_number: u64, - block_hash: B256, - block: BlockAndReceipts, - ) -> eyre::Result<()> { - self.inner.write().handle_confirmed_block(block_number, block_hash, block) + /// Returns the current pending height. + pub fn get_pending_height(&self) -> u64 { + let inner = self.inner.read(); + inner.pending_cache.as_ref().map_or(inner.confirm_height, |p| p.get_height()) } - /// Handles updating the pending state with a newly executed pending flashblocks - /// sequence. Note that it will replace any existing pending sequence. - pub fn handle_pending_sequence(&self, pending_sequence: PendingSequence) { - self.inner.write().handle_pending_sequence(pending_sequence) + /// Returns the block for the given block number, if cached. + pub fn get_block_by_number(&self, num: u64) -> Option> { + self.inner.read().confirm_cache.get_block_by_number(num) } - pub fn handle_canonical_block(&self, block_number: u64, block_hash: B256) { - self.inner.write().handle_canonical_block(block_number, block_hash) + /// Returns the confirmed block for the given block hash, if cached. + pub fn get_block_by_hash(&self, hash: &B256) -> Option> { + self.inner.read().confirm_cache.get_block_by_hash(hash) } - /// Returns the current confirmed cache height, if any blocks have been confirmed. - pub fn get_confirm_height(&self) -> Option { - self.inner.read().confirm_height + // --- Pending block --- + + /// Returns the current pending block and receipts, if any. + pub fn get_pending_block(&self) -> Option> { + self.inner.read().pending_cache.as_ref().map(|p| p.get_block()) } - /// Returns the current pending height, if any flashblocks have been executed. - pub fn get_pending_height(&self) -> Option { - self.inner.read().pending.as_ref().map(|p| p.pending.block().number()) + // --- Transaction/receipt lookup (pending + confirm) --- + + /// Looks up cached transaction info by hash: pending sequence first, then + /// confirm cache. Returns `None` if the tx is not in either cache layer. + pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option> { + self.inner.read().get_tx_info(tx_hash) } - /// Collects items from an inclusive block number range `[start..=end]`, using - /// the confirm cache as an overlay on top of the provider. - /// - /// Walks backward from `end`, collecting consecutive cache hits via `from_cache`. - /// Delegates the remaining prefix `[start..=provider_end]` to `from_provider`. - /// - /// When `predicate` is `Some`, items are filtered: the provider receives the - /// predicate to stop early, and cached items are checked before appending. - /// When `None`, all items in the range are collected unconditionally. - pub(super) fn collect_cached_block_range( - &self, - start: BlockNumber, - end: BlockNumber, - from_cache: impl Fn(&BlockAndReceipts) -> T, - from_provider: impl FnOnce( - core::ops::RangeInclusive, - &mut dyn FnMut(&T) -> bool, - ) -> ProviderResult>, - predicate: Option<&mut dyn FnMut(&T) -> bool>, - ) -> ProviderResult> { - let inner = self.inner.read(); - let mut cached_bars = Vec::new(); - let mut provider_end = end; - let mut index = end; - loop { - if let Some(bar) = inner.confirm_cache.get_block_by_number(index) { - cached_bars.push(bar); - provider_end = index.saturating_sub(1); - } else { - break; - } - if index == start { - break; - } - index -= 1; - } - cached_bars.reverse(); - drop(inner); + // --- Hash/number mapping --- - let has_provider_range = - provider_end >= start && cached_bars.len() < (end - start + 1) as usize; + /// Returns the block hash for the given block number, if cached in the + /// confirm cache. + pub fn get_block_hash(&self, num: u64) -> Option { + self.inner.read().confirm_cache.hash_for_number(num) + } - let mut always_true = |_: &T| true; - let predicate = predicate.unwrap_or(&mut always_true); + /// Returns the block number for the given block hash, if cached in the + /// confirm cache. + pub fn get_block_number(&self, hash: &B256) -> Option { + self.inner.read().confirm_cache.number_for_hash(hash) + } - let mut predicate_stopped = false; - let mut result = if has_provider_range { - let items = from_provider(start..=provider_end, predicate)?; - let expected = (provider_end - start + 1) as usize; - if items.len() < expected { - predicate_stopped = true; - } - items - } else { - Vec::new() - }; + // --- Range queries (for `eth_getLogs`) --- - if !predicate_stopped { - for bar in &cached_bars { - let item = from_cache(bar); - if !predicate(&item) { - break; - } - result.push(item); + /// Returns all cached confirmed blocks in the inclusive range `[start..=end]`. + /// Blocks not present in the cache are skipped (the caller must fill gaps + /// from the provider). + pub fn get_blocks_in_range(&self, start: u64, end: u64) -> Vec> { + let inner = self.inner.read(); + let mut result = Vec::new(); + for num in start..=end { + if let Some(bar) = inner.confirm_cache.get_block_by_number(num) { + result.push(bar); } } - - Ok(result) + result } +} - /// Resolves an `impl RangeBounds` into an inclusive `(start, end)` pair. - /// Matches reth's blockchain provider's convert_range_bounds semantics, and unbounded - /// ends are resolved to `best_block_number`. - pub(super) fn resolve_range_bounds( +// FlashblockStateCache state mutation interfaces. +impl FlashblockStateCache { + /// Handles updating the latest pending state by the flashblocks rpc handle. + /// + /// This method detects when the flashblocks sequencer has advanced to the next + /// pending sequence height, and optimistically commits the current pending + /// sequence to the confirm cache before advancing the pending tip. + /// + /// If the pending sequence to be updated is the same as the current pending + /// sequence, it will replace the existing with the incoming pending sequence. + /// + /// Note that this state update is fallible as it detects potential reorgs, and + /// triggers cache flush on invalidate entries. An entry is invalidated if the + /// incoming pending sequence height is not the next pending height or current + /// pending height. + pub fn handle_pending_sequence( &self, - range: impl RangeBounds, - ) -> ProviderResult<(BlockNumber, BlockNumber)> { - let start = match range.start_bound() { - core::ops::Bound::Included(&n) => n, - core::ops::Bound::Excluded(&n) => n + 1, - core::ops::Bound::Unbounded => 0, - }; - let end = match range.end_bound() { - core::ops::Bound::Included(&n) => n, - core::ops::Bound::Excluded(&n) => n - 1, - core::ops::Bound::Unbounded => self.best_block_number()?, - }; - Ok((start, end)) + pending_sequence: PendingSequence, + ) -> eyre::Result<()> { + self.inner.write().handle_pending_sequence(pending_sequence) + } + + /// Handles a canonical block commit by flushing stale confirmed entries and + /// the pending state if it matches the committed block. + /// + /// If reorg flag is set, the flashblocks state cache will be default be flushed. + pub fn handle_canonical_block(&self, block_number: u64, reorg: bool) { + self.inner.write().handle_canonical_block(block_number, reorg) } } /// Inner state of the flashblocks state cache. #[derive(Debug)] -pub(super) struct FlashblockStateCacheInner { +struct FlashblockStateCacheInner { /// The current in-progress pending flashblock sequence, if any. - pub(super) pending: Option>, + pending_cache: Option>, /// Cache of confirmed flashblock sequences ahead of the canonical chain. - pub(super) confirm_cache: ConfirmCache, - /// The highest confirmed block height. - pub(super) confirm_height: Option, - /// The highest canonical block height. - pub(super) canon_height: u64, + confirm_cache: ConfirmCache, + /// The highest confirmed block height of from both the confirm cache or + /// the pending cache. + confirm_height: u64, } impl FlashblockStateCacheInner { - fn new(canon_height: u64) -> Self { - Self { - pending: None, - confirm_cache: ConfirmCache::new(), - confirm_height: None, - canon_height, - } + fn new() -> Self { + Self { pending_cache: None, confirm_cache: ConfirmCache::new(), confirm_height: 0 } + } + + /// Looks up cached transaction info by hash: pending sequence first, then + /// confirm cache. Returns `None` if the tx is not in either cache layer. + fn get_tx_info(&self, tx_hash: &TxHash) -> Option> { + self.pending_cache + .as_ref() + .and_then(|p| p.get_tx_info(tx_hash)) + .or_else(|| self.confirm_cache.get_tx_info(tx_hash)) } - /// Handles a newly confirmed block with reorg detection. + /// Handles flushing a newly confirmed block to the confirm cache. Note that + /// this state update is fallible as it detects potential reorgs, and triggers + /// cache flush on invalidate entries. + /// + /// An entry is invalidated if: + /// 1. Block height to be is lower than the cache's confirmed height + /// 2. Block height to be is not the next confirm block height fn handle_confirmed_block( &mut self, block_number: u64, - block_hash: B256, block: BlockAndReceipts, ) -> eyre::Result<()> { - // Validation checks - if let Some(confirm_height) = self.confirm_height { - if block_number <= confirm_height { - // Reorg detected - confirm cache is polluted - return Err(eyre::eyre!( - "polluted state cache - trying to commit lower confirm height block" - )); - } - if block_number != confirm_height + 1 { - return Err(eyre::eyre!( - "polluted state cache - not next consecutive confirm height block" - )); - } + if block_number <= self.confirm_height { + return Err(eyre::eyre!( + "polluted state cache - trying to commit lower confirm height block" + )); + } + if block_number != self.confirm_height + 1 { + return Err(eyre::eyre!( + "polluted state cache - not next consecutive confirm height block" + )); } - // Commit new confirmed block to state cache - self.confirm_height = Some(block_number); - self.confirm_cache.insert(block_number, block_hash, block)?; + self.confirm_height = block_number; + self.confirm_cache.insert(block_number, block)?; Ok(()) } - /// Looks up cached transaction info by hash: pending sequence first, then - /// confirm cache. Returns `None` if the tx is not in either cache layer. - pub(super) fn get_tx_info(&self, tx_hash: &TxHash) -> Option> { - self.pending - .as_ref() - .and_then(|p| p.get_tx_info(tx_hash)) - .or_else(|| self.confirm_cache.get_tx_info(tx_hash)) - } + fn handle_pending_sequence( + &mut self, + pending_sequence: PendingSequence, + ) -> eyre::Result<()> { + let pending_height = pending_sequence.get_height(); + let expected_height = self.confirm_height + 1; - fn handle_pending_sequence(&mut self, pending_sequence: PendingSequence) { - self.pending = Some(pending_sequence); + if pending_height == expected_height + 1 { + // Pending tip has advanced — update pending state, and optimistically + // commit current pending to confirm cache + let sequence = self.pending_cache.take().ok_or_else(|| { + eyre::eyre!( + "polluted state cache - trying to advance pending tip but no current pending" + ) + })?; + self.handle_confirmed_block(expected_height, sequence.get_block())?; + self.pending_cache = Some(pending_sequence); + } else if pending_height == expected_height { + // Replace the existing pending sequence + self.pending_cache = Some(pending_sequence); + } else { + return Err(eyre::eyre!( + "polluted state cache - not next consecutive pending height block" + )); + } + Ok(()) } - fn handle_canonical_block(&mut self, block_number: u64, block_hash: B256) { - self.canon_height = block_number; - self.confirm_cache.flush_up_to(block_number); - if self.pending.as_ref().and_then(|p| p.block_hash) == Some(block_hash) { - self.pending = None; + fn handle_canonical_block(&mut self, canon_height: u64, reorg: bool) { + let pending_stale = + self.pending_cache.as_ref().is_some_and(|p| p.get_height() <= canon_height); + + if pending_stale || reorg { + warn!( + target: "flashblocks", + canonical_height = canon_height, + cache_height = self.confirm_height, + reorg, + "Flushing flashblocks state cache", + ); + self.flush(); + } else { + debug!( + target: "flashblocks", + canonical_height = canon_height, + cache_height = self.confirm_height, + "Flashblocks state cache received canonical block, flushing confirm cache up to canonical height" + ); + self.confirm_cache.flush_up_to(canon_height); } + self.confirm_height = self.confirm_height.max(canon_height); + } + + fn flush(&mut self) { + self.confirm_height = 0; + self.pending_cache = None; + self.confirm_cache.clear(); } } diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs index 20d69b96..7c87fb65 100644 --- a/crates/flashblocks/src/cache/pending.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -6,7 +6,7 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{TxHash, B256}; use reth_primitives_traits::NodePrimitives; use reth_revm::cached::CachedReads; -use reth_rpc_eth_types::PendingBlock; +use reth_rpc_eth_types::{block::BlockAndReceipts, PendingBlock}; /// The pending flashblocks sequence built with all received OpFlashblockPayload /// alongside the metadata for the last added flashblock. @@ -62,6 +62,14 @@ impl PendingSequence { self.has_computed_state_root = true; } + pub fn get_height(&self) -> u64 { + self.pending.block().number() + } + + pub fn get_block(&self) -> BlockAndReceipts { + self.pending.to_block_and_receipts() + } + /// Returns the cached transaction info for the given tx hash, if present /// in the pending sequence. pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option> { diff --git a/crates/flashblocks/src/cache/receipt.rs b/crates/flashblocks/src/cache/receipt.rs deleted file mode 100644 index 1a29297f..00000000 --- a/crates/flashblocks/src/cache/receipt.rs +++ /dev/null @@ -1,67 +0,0 @@ -use crate::cache::{FlashblockStateCache, StateCacheProvider}; - -use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, TxHash, TxNumber}; -use core::ops::{RangeBounds, RangeInclusive}; -use reth_primitives_traits::{NodePrimitives, ReceiptTy}; -use reth_storage_api::{errors::provider::ProviderResult, ReceiptProvider, ReceiptProviderIdExt}; - -impl> ReceiptProvider - for FlashblockStateCache -{ - type Receipt = ReceiptTy; - - fn receipt(&self, id: TxNumber) -> ProviderResult> { - self.provider.receipt(id) - } - - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { - if let Some(info) = self.inner.read().get_tx_info(&hash) { - return Ok(Some(info.receipt.clone())); - } - self.provider.receipt_by_hash(hash) - } - - fn receipts_by_block( - &self, - block: BlockHashOrNumber, - ) -> ProviderResult>> { - let cached = match block { - BlockHashOrNumber::Hash(hash) => { - self.inner.read().confirm_cache.get_block_by_hash(&hash) - } - BlockHashOrNumber::Number(num) => { - self.inner.read().confirm_cache.get_block_by_number(num) - } - }; - if let Some(bar) = cached { - return Ok(Some((*bar.receipts).clone())); - } - self.provider.receipts_by_block(block) - } - - fn receipts_by_tx_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - self.provider.receipts_by_tx_range(range) - } - - fn receipts_by_block_range( - &self, - block_range: RangeInclusive, - ) -> ProviderResult>> { - self.collect_cached_block_range( - *block_range.start(), - *block_range.end(), - |bar| (*bar.receipts).clone(), - |r, _| self.provider.receipts_by_block_range(r), - None, - ) - } -} - -impl> ReceiptProviderIdExt - for FlashblockStateCache -{ -} diff --git a/crates/flashblocks/src/cache/transaction.rs b/crates/flashblocks/src/cache/transaction.rs deleted file mode 100644 index c50405f0..00000000 --- a/crates/flashblocks/src/cache/transaction.rs +++ /dev/null @@ -1,126 +0,0 @@ -use crate::cache::{FlashblockStateCache, StateCacheProvider}; - -use alloy_consensus::{transaction::TxHashRef, BlockHeader}; -use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{Address, BlockNumber, TxHash, TxNumber}; -use core::ops::RangeBounds; -use reth_primitives_traits::{BlockBody, NodePrimitives, TransactionMeta}; -use reth_storage_api::{errors::provider::ProviderResult, TransactionsProvider}; - -impl> TransactionsProvider - for FlashblockStateCache -{ - type Transaction = N::SignedTx; - - fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { - self.provider.transaction_id(tx_hash) - } - - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { - self.provider.transaction_by_id(id) - } - - fn transaction_by_id_unhashed( - &self, - id: TxNumber, - ) -> ProviderResult> { - self.provider.transaction_by_id_unhashed(id) - } - - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { - if let Some(info) = self.inner.read().get_tx_info(&hash) { - return Ok(Some(info.tx.clone())); - } - self.provider.transaction_by_hash(hash) - } - - fn transaction_by_hash_with_meta( - &self, - hash: TxHash, - ) -> ProviderResult> { - let inner = self.inner.read(); - if let Some(info) = inner.get_tx_info(&hash) { - // Resolve block header fields: try confirm cache first, then pending block - let (base_fee, excess_blob_gas, timestamp) = inner - .confirm_cache - .get_block_by_number(info.block_number) - .map(|b| { - let h = b.block.header(); - (h.base_fee_per_gas(), h.excess_blob_gas(), h.timestamp()) - }) - .or_else(|| { - inner.pending.as_ref().map(|p| { - let h = p.pending.block().header(); - (h.base_fee_per_gas(), h.excess_blob_gas(), h.timestamp()) - }) - }) - .unwrap_or_default(); - - let meta = TransactionMeta { - tx_hash: *info.tx.tx_hash(), - index: info.tx_index, - block_hash: info.block_hash, - block_number: info.block_number, - base_fee, - excess_blob_gas, - timestamp, - }; - return Ok(Some((info.tx.clone(), meta))); - } - drop(inner); - self.provider.transaction_by_hash_with_meta(hash) - } - - fn transactions_by_block( - &self, - block: BlockHashOrNumber, - ) -> ProviderResult>> { - let cached = match block { - BlockHashOrNumber::Hash(hash) => { - self.inner.read().confirm_cache.get_block_by_hash(&hash) - } - BlockHashOrNumber::Number(num) => { - self.inner.read().confirm_cache.get_block_by_number(num) - } - }; - if let Some(bar) = cached { - return Ok(Some(bar.block.body().transactions().to_vec())); - } - self.provider.transactions_by_block(block) - } - - fn transactions_by_block_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult>> { - let (start, end) = self.resolve_range_bounds(range)?; - if start > end { - return Ok(Vec::new()); - } - self.collect_cached_block_range( - start, - end, - |bar| bar.block.body().transactions().to_vec(), - |r, _| self.provider.transactions_by_block_range(r), - None, - ) - } - - fn transactions_by_tx_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - self.provider.transactions_by_tx_range(range) - } - - fn senders_by_tx_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - self.provider.senders_by_tx_range(range) - } - - fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - self.provider.transaction_sender(id) - } -} diff --git a/crates/flashblocks/src/cache/utils.rs b/crates/flashblocks/src/cache/utils.rs index aa0eefcb..6cd5f8c7 100644 --- a/crates/flashblocks/src/cache/utils.rs +++ b/crates/flashblocks/src/cache/utils.rs @@ -1,33 +1,5 @@ -use reth_primitives_traits::{Block, BlockTy, HeaderTy, NodePrimitives, ReceiptTy}; +use reth_primitives_traits::{Block, BlockTy, NodePrimitives}; use reth_rpc_eth_types::block::BlockAndReceipts; -use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; - -/// Provider trait bound alias used throughout the `FlashblockStateCache` implementation. -/// -/// The provider must implement the full reth block reader + state provider stack. -pub(crate) trait StateCacheProvider: - StateProviderFactory - + BlockReaderIdExt< - Header = HeaderTy, - Block = BlockTy, - Transaction = N::SignedTx, - Receipt = ReceiptTy, - > + Unpin -{ -} - -impl StateCacheProvider for P -where - N: NodePrimitives, - P: StateProviderFactory - + BlockReaderIdExt< - Header = HeaderTy, - Block = BlockTy, - Transaction = N::SignedTx, - Receipt = ReceiptTy, - > + Unpin, -{ -} pub(crate) fn block_from_bar(bar: &BlockAndReceipts) -> BlockTy { BlockTy::::new(bar.block.header().clone(), bar.block.body().clone()) diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 311ac95b..135ccd20 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -11,16 +11,35 @@ repository.workspace = true default = [] [dependencies] +xlayer-flashblocks.workspace = true + +# reth reth-optimism-rpc.workspace = true +reth-optimism-primitives.workspace = true +reth-primitives-traits.workspace = true reth-rpc.workspace = true +reth-rpc-convert.workspace = true reth-rpc-eth-api.workspace = true +reth-rpc-eth-types.workspace = true + +# alloy +alloy-consensus.workspace = true +alloy-eips.workspace = true +alloy-primitives.workspace = true +alloy-rpc-types-eth.workspace = true + +# op +op-alloy-network.workspace = true +# rpc +async-trait.workspace = true jsonrpsee.workspace = true -serde.workspace = true + +# misc +tracing.workspace = true [dev-dependencies] tokio = { workspace = true, features = ["rt", "macros"] } [lints] workspace = true - diff --git a/crates/rpc/src/eth.rs b/crates/rpc/src/eth.rs new file mode 100644 index 00000000..80fed97d --- /dev/null +++ b/crates/rpc/src/eth.rs @@ -0,0 +1,796 @@ +//! Eth API override module for flashblocks RPC. +//! +//! Provides `EthApiOverride` — a jsonrpsee `#[rpc]` trait that overrides a +//! subset of `eth_*` methods to serve flashblocks data from the +//! [`FlashblockStateCache`] alongside canonical chain data from the inner +//! `Eth` API. +//! +//! Also provides `XlayerRpcExtApi` — a separate `#[rpc]` trait that exposes +//! X Layer-specific methods like `eth_flashblocksEnabled`. +//! +//! The override handler checks the flashblocks cache first for confirmed and +//! pending blocks, then falls back to the canonical `eth_api` for all other +//! queries. For transaction/receipt lookups, canonical is checked **first** to +//! avoid a race condition where the cache hasn't been cleared yet after a +//! canonical block commit. + +use alloy_consensus::BlockHeader; +use alloy_eips::{BlockId, BlockNumberOrTag}; +use alloy_primitives::{Address, Bytes, TxHash, U256}; +use alloy_rpc_types_eth::{ + state::{EvmOverrides, StateOverride}, + BlockOverrides, Filter, Log, TransactionInfo, +}; +use jsonrpsee::{ + core::{async_trait, RpcResult}, + proc_macros::rpc, +}; +use op_alloy_network::Optimism; +use reth_optimism_primitives::OpPrimitives; +use reth_primitives_traits::{BlockBody, NodePrimitives, SignerRecoverable}; +use reth_rpc::eth::EthFilter; +use reth_rpc_convert::{RpcConvert, RpcTransaction}; +use reth_rpc_eth_api::{ + helpers::{EthBlocks, EthCall, EthState, EthTransactions, FullEthApi}, + EthApiTypes, EthFilterApiServer, RpcBlock, RpcReceipt, +}; +use tracing::debug; +use xlayer_flashblocks::cache::FlashblockStateCache; + +// --------------------------------------------------------------------------- +// EthApiOverride — flashblocks `eth_*` method overrides +// --------------------------------------------------------------------------- + +/// Eth API override trait for flashblocks integration. +/// +/// Methods in this trait override the default `eth_*` JSON-RPC namespace +/// handlers when flashblocks are active. They are registered via +/// `add_or_replace_if_module_configured` to replace the corresponding +/// default implementations. +#[cfg_attr(not(test), rpc(server, namespace = "eth"))] +#[cfg_attr(test, rpc(server, client, namespace = "eth"))] +pub trait EthApiOverride { + // --- Block queries --- + + /// Returns the current block number, accounting for confirmed flashblocks. + #[method(name = "blockNumber")] + async fn block_number(&self) -> RpcResult; + + /// Returns a block by number, with flashblocks support for pending/confirmed. + #[method(name = "getBlockByNumber")] + async fn get_block_by_number( + &self, + number: BlockNumberOrTag, + full: bool, + ) -> RpcResult>>; + + /// Returns a block by hash, checking flashblocks confirm cache first. + #[method(name = "getBlockByHash")] + async fn get_block_by_hash( + &self, + hash: alloy_primitives::B256, + full: bool, + ) -> RpcResult>>; + + /// Returns the transaction count for a block by number. + #[method(name = "getBlockTransactionCountByNumber")] + async fn get_block_transaction_count_by_number( + &self, + number: BlockNumberOrTag, + ) -> RpcResult>; + + /// Returns the transaction count for a block by hash. + #[method(name = "getBlockTransactionCountByHash")] + async fn get_block_transaction_count_by_hash( + &self, + hash: alloy_primitives::B256, + ) -> RpcResult>; + + /// Returns all receipts for a block. + #[method(name = "getBlockReceipts")] + async fn get_block_receipts( + &self, + block_id: BlockNumberOrTag, + ) -> RpcResult>>>; + + // --- Transaction queries --- + + /// Returns a transaction by hash (canonical-first to avoid race conditions). + #[method(name = "getTransactionByHash")] + async fn get_transaction_by_hash( + &self, + hash: TxHash, + ) -> RpcResult>>; + + /// Returns a transaction by block hash and index. + #[method(name = "getTransactionByBlockHashAndIndex")] + async fn get_transaction_by_block_hash_and_index( + &self, + block_hash: alloy_primitives::B256, + index: alloy_eips::BlockNumberOrTag, + ) -> RpcResult>>; + + /// Returns a transaction by block number and index. + #[method(name = "getTransactionByBlockNumberAndIndex")] + async fn get_transaction_by_block_number_and_index( + &self, + block_number: BlockNumberOrTag, + index: alloy_eips::BlockNumberOrTag, + ) -> RpcResult>>; + + /// Returns a transaction receipt (canonical-first to avoid race conditions). + #[method(name = "getTransactionReceipt")] + async fn get_transaction_receipt( + &self, + hash: TxHash, + ) -> RpcResult>>; + + // --- State queries --- + + /// Returns account balance, with flashblocks support for pending state. + #[method(name = "getBalance")] + async fn get_balance(&self, address: Address, block_number: Option) + -> RpcResult; + + /// Returns the transaction count (nonce) for an address. + #[method(name = "getTransactionCount")] + async fn get_transaction_count( + &self, + address: Address, + block_number: Option, + ) -> RpcResult; + + /// Returns the code at a given address. + #[method(name = "getCode")] + async fn get_code(&self, address: Address, block_number: Option) -> RpcResult; + + /// Returns the storage value at a given address and slot. + #[method(name = "getStorageAt")] + async fn get_storage_at( + &self, + address: Address, + slot: U256, + block_number: Option, + ) -> RpcResult; + + /// Executes a call with flashblock state support. + #[method(name = "call")] + async fn call( + &self, + transaction: alloy_rpc_types_eth::TransactionRequest, + block_number: Option, + state_overrides: Option, + block_overrides: Option>, + ) -> RpcResult; + + /// Estimates gas with flashblock state support. + #[method(name = "estimateGas")] + async fn estimate_gas( + &self, + transaction: alloy_rpc_types_eth::TransactionRequest, + block_number: Option, + overrides: Option, + ) -> RpcResult; + + // --- Logs --- + + /// Returns logs matching the filter, including pending flashblock logs. + #[method(name = "getLogs")] + async fn get_logs(&self, filter: Filter) -> RpcResult>; +} + +/// Extended Eth API with flashblocks cache overlay. +/// +/// Wraps the canonical `eth_api` and `eth_filter` alongside a +/// [`FlashblockStateCache`] to serve flashblocks data for confirmed and +/// pending blocks while delegating canonical chain queries to the underlying +/// `Eth` API. +#[derive(Debug)] +pub struct XLayerEthApiExt { + eth_api: Eth, + eth_filter: EthFilter, + flash_cache: FlashblockStateCache, +} + +impl XLayerEthApiExt { + /// Creates a new [`XLayerEthApiExt`]. + pub fn new( + eth_api: Eth, + eth_filter: EthFilter, + flash_cache: FlashblockStateCache, + ) -> Self { + Self { eth_api, eth_filter, flash_cache } + } +} + +#[async_trait] +impl EthApiOverrideServer for XLayerEthApiExt +where + Eth: FullEthApi + Send + Sync + 'static, + jsonrpsee_types::error::ErrorObject<'static>: From, +{ + async fn block_number(&self) -> RpcResult { + // The cache's confirm height is always >= canonical height (it tracks + // the max of confirm cache tip and canonical tip). Use the cache's + // pending height (which accounts for the in-progress flashblock + // sequence) as the reported block number. + let height = self.flash_cache.get_pending_height(); + Ok(U256::from(height)) + } + + async fn get_block_by_number( + &self, + number: BlockNumberOrTag, + full: bool, + ) -> RpcResult>> { + debug!(target: "xlayer::rpc", ?number, "eth_getBlockByNumber"); + + if number.is_pending() { + // Return pending flashblock if available + if let Some(bar) = self.flash_cache.get_pending_block() { + return bar_to_rpc_block::(&bar, full, self.eth_api.converter()) + .map(Some) + .map_err(Into::into); + } + // No pending flashblock — treat as latest + return EthBlocks::rpc_block(&self.eth_api, BlockNumberOrTag::Latest.into(), full) + .await + .map_err(Into::into); + } + + // Check confirm cache for specific block numbers + if let BlockNumberOrTag::Number(num) = number { + if let Some(bar) = self.flash_cache.get_block_by_number(num) { + return bar_to_rpc_block::(&bar, full, self.eth_api.converter()) + .map(Some) + .map_err(Into::into); + } + } + + // Delegate to canonical + EthBlocks::rpc_block(&self.eth_api, number.into(), full).await.map_err(Into::into) + } + + async fn get_block_by_hash( + &self, + hash: alloy_primitives::B256, + full: bool, + ) -> RpcResult>> { + debug!(target: "xlayer::rpc", %hash, "eth_getBlockByHash"); + + // Check confirm cache first + if let Some(bar) = self.flash_cache.get_block_by_hash(&hash) { + return bar_to_rpc_block::(&bar, full, self.eth_api.converter()) + .map(Some) + .map_err(Into::into); + } + + // Delegate to canonical + EthBlocks::rpc_block(&self.eth_api, hash.into(), full).await.map_err(Into::into) + } + + async fn get_block_transaction_count_by_number( + &self, + number: BlockNumberOrTag, + ) -> RpcResult> { + debug!(target: "xlayer::rpc", ?number, "eth_getBlockTransactionCountByNumber"); + + if number.is_pending() { + if let Some(bar) = self.flash_cache.get_pending_block() { + let count = bar.block.body().transaction_count(); + return Ok(Some(U256::from(count))); + } + return EthBlocks::block_transaction_count( + &self.eth_api, + BlockNumberOrTag::Latest.into(), + ) + .await + .map(|opt| opt.map(U256::from)) + .map_err(Into::into); + } + + if let BlockNumberOrTag::Number(num) = number { + if let Some(bar) = self.flash_cache.get_block_by_number(num) { + let count = bar.block.body().transaction_count(); + return Ok(Some(U256::from(count))); + } + } + + EthBlocks::block_transaction_count(&self.eth_api, number.into()) + .await + .map(|opt| opt.map(U256::from)) + .map_err(Into::into) + } + + async fn get_block_transaction_count_by_hash( + &self, + hash: alloy_primitives::B256, + ) -> RpcResult> { + debug!(target: "xlayer::rpc", %hash, "eth_getBlockTransactionCountByHash"); + + if let Some(bar) = self.flash_cache.get_block_by_hash(&hash) { + let count = bar.block.body().transaction_count(); + return Ok(Some(U256::from(count))); + } + + EthBlocks::block_transaction_count(&self.eth_api, hash.into()) + .await + .map(|opt| opt.map(U256::from)) + .map_err(Into::into) + } + + async fn get_block_receipts( + &self, + block_id: BlockNumberOrTag, + ) -> RpcResult>>> { + debug!(target: "xlayer::rpc", ?block_id, "eth_getBlockReceipts"); + + let bar = if block_id.is_pending() { + self.flash_cache.get_pending_block() + } else if let BlockNumberOrTag::Number(num) = block_id { + self.flash_cache.get_block_by_number(num) + } else { + None + }; + + if let Some(bar) = bar { + let receipts = + bar_to_rpc_receipts::(&bar, self.eth_api.converter()).map_err(Into::into)?; + return Ok(Some(receipts)); + } + + // Delegate to canonical — use the block_receipts helper from the eth_api + // For now, delegate to the canonical handler directly + // TODO: Once reth exposes a direct block_receipts helper, use it + Ok(None) + } + + async fn get_transaction_by_hash( + &self, + hash: TxHash, + ) -> RpcResult>> { + debug!(target: "xlayer::rpc", %hash, "eth_getTransactionByHash"); + + // Check canonical chain FIRST to avoid race condition where flashblocks + // cache hasn't been cleared yet after canonical block commit + if let Some(tx_source) = EthTransactions::transaction_by_hash(&self.eth_api, hash).await? { + let rpc_tx = + tx_source.into_transaction(self.eth_api.converter()).map_err(Into::into)?; + return Ok(Some(rpc_tx)); + } + + // Fall back to flashblocks cache + if let Some(info) = self.flash_cache.get_tx_info(&hash) { + let tx_info = TransactionInfo { + hash: Some(hash), + index: Some(info.tx_index), + block_hash: Some(info.block_hash), + block_number: Some(info.block_number), + base_fee: None, + }; + let recovered = reth_primitives_traits::Recovered::new_unchecked( + info.tx.clone(), + info.tx.recover_signer().unwrap_or_default(), + ); + let rpc_tx = self.eth_api.converter().fill(recovered, tx_info).map_err(Into::into)?; + return Ok(Some(rpc_tx)); + } + + Ok(None) + } + + async fn get_transaction_by_block_hash_and_index( + &self, + block_hash: alloy_primitives::B256, + index: alloy_eips::BlockNumberOrTag, + ) -> RpcResult>> { + debug!(target: "xlayer::rpc", %block_hash, ?index, "eth_getTransactionByBlockHashAndIndex"); + + let tx_index = match index { + BlockNumberOrTag::Number(n) => n, + _ => return Ok(None), + }; + + if let Some(bar) = self.flash_cache.get_block_by_hash(&block_hash) { + return get_tx_by_index_from_bar::(&bar, tx_index, self.eth_api.converter()) + .map_err(Into::into); + } + + // Delegate to canonical + // The canonical eth_api doesn't expose this directly in a helper trait, + // so we just return None for non-cached blocks and let the main handler + // deal with it. The override will be registered with add_or_replace, so + // this only gets called for our override. + Ok(None) + } + + async fn get_transaction_by_block_number_and_index( + &self, + block_number: BlockNumberOrTag, + index: alloy_eips::BlockNumberOrTag, + ) -> RpcResult>> { + debug!(target: "xlayer::rpc", ?block_number, ?index, "eth_getTransactionByBlockNumberAndIndex"); + + let tx_index = match index { + BlockNumberOrTag::Number(n) => n, + _ => return Ok(None), + }; + + let bar = if block_number.is_pending() { + self.flash_cache.get_pending_block() + } else if let BlockNumberOrTag::Number(num) = block_number { + self.flash_cache.get_block_by_number(num) + } else { + None + }; + + if let Some(bar) = bar { + return get_tx_by_index_from_bar::(&bar, tx_index, self.eth_api.converter()) + .map_err(Into::into); + } + + Ok(None) + } + + async fn get_transaction_receipt( + &self, + hash: TxHash, + ) -> RpcResult>> { + debug!(target: "xlayer::rpc", %hash, "eth_getTransactionReceipt"); + + // Check canonical chain FIRST to avoid race condition + if let Some(canonical_receipt) = + EthTransactions::transaction_receipt(&self.eth_api, hash).await? + { + return Ok(Some(canonical_receipt)); + } + + // Fall back to flashblocks cache + if let Some(info) = self.flash_cache.get_tx_info(&hash) { + let receipt = cached_tx_info_to_rpc_receipt::(&info, self.eth_api.converter()) + .map_err(Into::into)?; + return Ok(Some(receipt)); + } + + Ok(None) + } + + // --- State queries (Phase 1: delegate to eth_api) --- + + async fn get_balance( + &self, + address: Address, + block_number: Option, + ) -> RpcResult { + // Phase 1: delegate entirely to eth_api + // Phase 2 will add pending state override from flashblocks cache + EthState::balance(&self.eth_api, address, block_number).await.map_err(Into::into) + } + + async fn get_transaction_count( + &self, + address: Address, + block_number: Option, + ) -> RpcResult { + EthState::transaction_count(&self.eth_api, address, block_number).await.map_err(Into::into) + } + + async fn get_code(&self, address: Address, block_number: Option) -> RpcResult { + EthState::get_code(&self.eth_api, address, block_number).await.map_err(Into::into) + } + + async fn get_storage_at( + &self, + address: Address, + slot: U256, + block_number: Option, + ) -> RpcResult { + EthState::storage_at( + &self.eth_api, + address, + alloy_rpc_types_eth::JsonStorageKey(slot.into()), + block_number, + ) + .await + .map_err(Into::into) + } + + async fn call( + &self, + transaction: alloy_rpc_types_eth::TransactionRequest, + block_number: Option, + state_overrides: Option, + block_overrides: Option>, + ) -> RpcResult { + // Phase 1: delegate entirely to eth_api + // Phase 2 will merge flashblocks state overrides for pending + EthCall::call( + &self.eth_api, + transaction, + block_number, + EvmOverrides::new(state_overrides, block_overrides), + ) + .await + .map_err(Into::into) + } + + async fn estimate_gas( + &self, + transaction: alloy_rpc_types_eth::TransactionRequest, + block_number: Option, + overrides: Option, + ) -> RpcResult { + // Phase 1: delegate entirely to eth_api + let block_id = block_number.unwrap_or_default(); + EthCall::estimate_gas_at(&self.eth_api, transaction, block_id, overrides) + .await + .map_err(Into::into) + } + + async fn get_logs(&self, filter: Filter) -> RpcResult> { + debug!(target: "xlayer::rpc", ?filter.address, "eth_getLogs"); + + // Check if this is a range query with pending toBlock + let (from_block, to_block) = match &filter.block_option { + alloy_rpc_types_eth::FilterBlockOption::Range { from_block, to_block } => { + (*from_block, *to_block) + } + _ => { + // Block hash queries or other formats — delegate to eth filter + return self.eth_filter.logs(filter).await; + } + }; + + // If toBlock is not pending, delegate to eth filter + if !matches!(to_block, Some(BlockNumberOrTag::Pending)) { + return self.eth_filter.logs(filter).await; + } + + // Mixed query: toBlock is pending — combine historical + pending logs + let mut all_logs = Vec::new(); + + // Get historical logs if fromBlock is not pending + if !matches!(from_block, Some(BlockNumberOrTag::Pending)) { + let mut historical_filter = filter.clone(); + historical_filter.block_option = alloy_rpc_types_eth::FilterBlockOption::Range { + from_block, + to_block: Some(BlockNumberOrTag::Latest), + }; + let historical_logs: Vec = self.eth_filter.logs(historical_filter).await?; + all_logs.extend(historical_logs); + } + + // Get pending logs from flashblocks cache + if let Some(pending_bar) = self.flash_cache.get_pending_block() { + let pending_logs = extract_logs_from_bar(&pending_bar, &filter); + // Dedup: skip logs already fetched in historical range + let historical_max_block = all_logs.last().and_then(|l| l.block_number); + for log in pending_logs { + if let Some(max_block) = historical_max_block { + if log.block_number.is_some_and(|n| n <= max_block) { + continue; + } + } + all_logs.push(log); + } + } + + Ok(all_logs) + } +} + +// --------------------------------------------------------------------------- +// Helper functions +// --------------------------------------------------------------------------- + +/// Converts a [`BlockAndReceipts`] into an RPC block. +fn bar_to_rpc_block>( + bar: &reth_rpc_eth_types::block::BlockAndReceipts, + full: bool, + converter: &Eth::RpcConvert, +) -> Result, Eth::Error> +where + Eth::Error: From<::Error>, +{ + bar.block + .clone_into_rpc_block( + full.into(), + |tx, tx_info| converter.fill(tx, tx_info), + |header, size| converter.convert_header(header, size), + ) + .map_err(Into::into) +} + +/// Converts all receipts from a [`BlockAndReceipts`] into RPC receipts. +fn bar_to_rpc_receipts>( + bar: &reth_rpc_eth_types::block::BlockAndReceipts, + converter: &Eth::RpcConvert, +) -> Result>, Eth::Error> +where + Eth::Error: From<::Error>, +{ + use alloy_consensus::transaction::TxHashRef; + use reth_rpc_convert::transaction::ConvertReceiptInput; + + let block_hash = bar.block.hash(); + let block_number = bar.block.number(); + + let txs = bar.block.body().transactions(); + let receipts = bar.receipts.as_ref(); + + let mut inputs = Vec::with_capacity(txs.len()); + let mut next_log_index = 0usize; + let mut prev_cumulative_gas = 0u64; + + for (idx, (tx, receipt)) in txs.iter().zip(receipts.iter()).enumerate() { + let gas_used = receipt.as_receipt().cumulative_gas_used - prev_cumulative_gas; + prev_cumulative_gas = receipt.as_receipt().cumulative_gas_used; + + let meta = reth_primitives_traits::TransactionMeta { + tx_hash: *tx.tx_hash(), + index: idx as u64, + block_hash, + block_number, + base_fee: bar.block.base_fee_per_gas(), + excess_blob_gas: bar.block.excess_blob_gas(), + timestamp: bar.block.timestamp(), + }; + + inputs.push(ConvertReceiptInput { + receipt: receipt.clone(), + tx: reth_primitives_traits::Recovered::new_unchecked( + tx, + tx.recover_signer().unwrap_or_default(), + ), + gas_used, + next_log_index, + meta, + }); + + next_log_index += receipt.as_receipt().logs.len(); + } + + converter.convert_receipts(inputs).map_err(Into::into) +} + +/// Converts a single [`CachedTxInfo`] into an RPC receipt. +fn cached_tx_info_to_rpc_receipt>( + info: &xlayer_flashblocks::cache::CachedTxInfo, + converter: &Eth::RpcConvert, +) -> Result, Eth::Error> +where + Eth::Error: From<::Error>, +{ + use alloy_consensus::transaction::TxHashRef; + use reth_rpc_convert::transaction::ConvertReceiptInput; + + let gas_used = info.receipt.as_receipt().cumulative_gas_used; + let meta = reth_primitives_traits::TransactionMeta { + tx_hash: *info.tx.tx_hash(), + index: info.tx_index, + block_hash: info.block_hash, + block_number: info.block_number, + base_fee: None, + excess_blob_gas: None, + timestamp: 0, + }; + + let input = ConvertReceiptInput { + receipt: info.receipt.clone(), + tx: reth_primitives_traits::Recovered::new_unchecked( + &info.tx, + info.tx.recover_signer().unwrap_or_default(), + ), + gas_used, + next_log_index: 0, + meta, + }; + + let mut receipts = converter.convert_receipts(vec![input]).map_err(Into::into)?; + Ok(receipts.remove(0)) +} + +/// Gets a transaction by index from a [`BlockAndReceipts`]. +fn get_tx_by_index_from_bar>( + bar: &reth_rpc_eth_types::block::BlockAndReceipts, + tx_index: u64, + converter: &Eth::RpcConvert, +) -> Result>, Eth::Error> +where + Eth::Error: From<::Error>, +{ + use alloy_consensus::transaction::TxHashRef; + + let txs = bar.block.body().transactions(); + let idx = tx_index as usize; + if idx >= txs.len() { + return Ok(None); + } + + let tx = &txs[idx]; + let block_hash = bar.block.hash(); + let block_number = bar.block.number(); + + let tx_info = TransactionInfo { + hash: Some(*tx.tx_hash()), + index: Some(tx_index), + block_hash: Some(block_hash), + block_number: Some(block_number), + base_fee: bar.block.base_fee_per_gas(), + }; + + let recovered = reth_primitives_traits::Recovered::new_unchecked( + tx.clone(), + tx.recover_signer().unwrap_or_default(), + ); + let rpc_tx = converter.fill(recovered, tx_info).map_err(Into::into)?; + Ok(Some(rpc_tx)) +} + +/// Extracts logs from a [`BlockAndReceipts`] that match the given filter. +fn extract_logs_from_bar( + bar: &reth_rpc_eth_types::block::BlockAndReceipts, + filter: &Filter, +) -> Vec { + use alloy_consensus::transaction::TxHashRef; + + let block_hash = bar.block.hash(); + let block_number = bar.block.number(); + + let mut logs = Vec::new(); + let mut log_index = 0u64; + + let txs = bar.block.body().transactions(); + let receipts = bar.receipts.as_ref(); + + for (tx_idx, (tx, receipt)) in txs.iter().zip(receipts.iter()).enumerate() { + for receipt_log in &receipt.as_receipt().logs { + // Check address filter + if !filter.address.matches_any(&receipt_log.address) { + log_index += 1; + continue; + } + + // Check topics filter + if !filter_matches_topics(&filter.topics, &receipt_log.topics()) { + log_index += 1; + continue; + } + + logs.push(Log { + inner: receipt_log.clone(), + block_hash: Some(block_hash), + block_number: Some(block_number), + block_timestamp: Some(bar.block.timestamp()), + transaction_hash: Some(*tx.tx_hash()), + transaction_index: Some(tx_idx as u64), + log_index: Some(log_index), + removed: false, + }); + log_index += 1; + } + } + + logs +} + +/// Checks if log topics match the filter topics. +fn filter_matches_topics( + filter_topics: &[alloy_rpc_types_eth::FilterSet], + log_topics: &[alloy_primitives::B256], +) -> bool { + for (i, filter_set) in filter_topics.iter().enumerate() { + if filter_set.is_empty() { + continue; + } + match log_topics.get(i) { + Some(topic) => { + if !filter_set.matches(topic) { + return false; + } + } + None => return false, + } + } + true +} diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index bb620fb6..8696507e 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -1,15 +1,15 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg))] +pub mod eth; pub mod xlayer_ext; -use std::time::Instant; -// Re-export for convenience +pub use eth::{EthApiOverrideServer, XLayerEthApiExt}; pub use xlayer_ext::{ - PendingFlashBlockProvider, SequencerClientProvider, XlayerRpcExt, XlayerRpcExtApiServer, + SequencerClientProvider, XlayerRpcExt, XlayerRpcExtApiServer, }; -// Implement SequencerClientProvider for OpEthApi +// Implement `SequencerClientProvider` for `OpEthApi` use reth_optimism_rpc::{OpEthApi, SequencerClient}; use reth_rpc_eth_api::{RpcConvert, RpcNodeCore}; @@ -22,17 +22,3 @@ where self.sequencer_client() } } - -impl PendingFlashBlockProvider for OpEthApi -where - N: RpcNodeCore, - Rpc: RpcConvert, -{ - fn has_pending_flashblock(&self) -> bool { - self.pending_block_rx().is_some_and(|rx| { - rx.borrow() - .as_ref() - .is_some_and(|pending_flashblock| Instant::now() < pending_flashblock.expires_at) - }) - } -} diff --git a/crates/rpc/src/xlayer_ext.rs b/crates/rpc/src/xlayer_ext.rs index 3eaec3de..8ca4088a 100644 --- a/crates/rpc/src/xlayer_ext.rs +++ b/crates/rpc/src/xlayer_ext.rs @@ -1,12 +1,10 @@ -use std::sync::Arc; - use jsonrpsee::{ core::{async_trait, RpcResult}, proc_macros::rpc, }; - +use reth_optimism_primitives::OpPrimitives; use reth_optimism_rpc::SequencerClient; -use reth_rpc::RpcTypes; +use xlayer_flashblocks::cache::FlashblockStateCache; /// Trait for accessing sequencer client from backend pub trait SequencerClientProvider { @@ -14,92 +12,57 @@ pub trait SequencerClientProvider { fn sequencer_client(&self) -> Option<&SequencerClient>; } -/// Trait for checking if pending block (flashblocks) is enabled -pub trait PendingFlashBlockProvider { - /// Returns true if pending block receiver is available and has actual pending block data (flashblocks enabled) - fn has_pending_flashblock(&self) -> bool; -} - -/// XLayer-specific RPC API trait -#[rpc(server, namespace = "eth", server_bounds( - Net: 'static + RpcTypes, - ::TransactionRequest: - serde::de::DeserializeOwned + serde::Serialize -))] -pub trait XlayerRpcExtApi { - /// Returns boolean indicating if the node's flashblocks functionality is enabled and working. +/// `XLayer`-specific RPC API trait. +#[rpc(server, namespace = "eth")] +pub trait XlayerRpcExtApi { + /// Returns boolean indicating if the node's flashblocks functionality is + /// enabled and working. + /// + /// Returns `true` when the flashblocks state cache has been initialized + /// (i.e. confirm height > 0), meaning the node is actively receiving and + /// caching flashblock data. #[method(name = "flashblocksEnabled")] async fn flashblocks_enabled(&self) -> RpcResult; } -/// XLayer RPC extension implementation -#[derive(Debug)] -pub struct XlayerRpcExt { - pub backend: Arc, +/// `XLayer` RPC extension implementation. +/// +/// Checks the [`FlashblockStateCache`] confirm height to determine if +/// flashblocks are active. A non-zero confirm height means the cache has been +/// initialized and is actively tracking flashblock state. +#[derive(Debug, Clone)] +pub struct XlayerRpcExt { + flash_cache: Option>, +} + +impl XlayerRpcExt { + /// Creates a new [`XlayerRpcExt`]. + pub fn new(flash_cache: Option>) -> Self { + Self { flash_cache } + } } #[async_trait] -impl XlayerRpcExtApiServer for XlayerRpcExt -where - T: PendingFlashBlockProvider + Send + Sync + 'static, - Net: RpcTypes + Send + Sync + 'static, -{ +impl XlayerRpcExtApiServer for XlayerRpcExt { async fn flashblocks_enabled(&self) -> RpcResult { - Ok(self.backend.has_pending_flashblock()) + Ok(self.flash_cache.as_ref().is_some_and(|cache| cache.get_confirm_height() > 0)) } } #[cfg(test)] mod tests { - use super::PendingFlashBlockProvider; - use std::time::{Duration, Instant}; - use tokio::sync::watch; - - struct MockPendingFlashBlock { - expires_at: Instant, - } - - struct MockPendingFlashBlockProvider { - rx: Option>>, - } - - impl PendingFlashBlockProvider for MockPendingFlashBlockProvider { - fn has_pending_flashblock(&self) -> bool { - self.rx.as_ref().is_some_and(|rx| { - rx.borrow().as_ref().is_some_and(|pending_flashblock| { - Instant::now() < pending_flashblock.expires_at - }) - }) - } - } - - #[test] - fn test_no_receiver_returns_false() { - let provider = MockPendingFlashBlockProvider { rx: None }; - assert!(!provider.has_pending_flashblock()); - } - - #[test] - fn test_empty_receiver_returns_false() { - let (_tx, rx) = watch::channel(None); - let provider = MockPendingFlashBlockProvider { rx: Some(rx) }; - assert!(!provider.has_pending_flashblock()); - } + use super::*; #[test] - fn test_expired_flashblock_returns_false() { - let expired = - MockPendingFlashBlock { expires_at: Instant::now() - Duration::from_secs(60) }; - let (_tx, rx) = watch::channel(Some(expired)); - let provider = MockPendingFlashBlockProvider { rx: Some(rx) }; - assert!(!provider.has_pending_flashblock()); + fn test_flashblocks_disabled_when_no_cache() { + let ext = XlayerRpcExt::new(None); + assert!(ext.flash_cache.is_none()); } #[test] - fn test_valid_flashblock_returns_true() { - let valid = MockPendingFlashBlock { expires_at: Instant::now() + Duration::from_secs(60) }; - let (_tx, rx) = watch::channel(Some(valid)); - let provider = MockPendingFlashBlockProvider { rx: Some(rx) }; - assert!(provider.has_pending_flashblock()); + fn test_flashblocks_disabled_at_zero_height() { + let cache = FlashblockStateCache::::new(); + let ext = XlayerRpcExt::new(Some(cache)); + assert!(ext.flash_cache.as_ref().unwrap().get_confirm_height() == 0); } } From 1dd2c755a78d1a3730a2408ee54af0cf00deb49a Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 11 Mar 2026 18:03:17 +0800 Subject: [PATCH 14/76] style(rpc): clean up import ordering and formatting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- bin/node/src/main.rs | 8 +++----- crates/rpc/src/lib.rs | 4 +--- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index 8ac929e1..372cb0f8 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -11,25 +11,23 @@ use either::Either; use std::sync::Arc; use tracing::info; +use reth::providers::BlockNumReader; use reth::rpc::eth::EthApiTypes; use reth::{ builder::{DebugNodeLauncher, EngineNodeLauncher, Node, NodeHandle, TreeConfig}, providers::providers::BlockchainProvider, }; -use reth::providers::BlockNumReader; use reth_node_api::FullNodeComponents; use reth_optimism_cli::Cli; use reth_optimism_node::{args::RollupArgs, OpNode}; use reth_rpc_server_types::RethRpcModule; use xlayer_chainspec::XLayerChainSpecParser; +use xlayer_flashblocks::cache::FlashblockStateCache; use xlayer_flashblocks::{handle::FlashblocksService, subscription::FlashblocksPubSub}; use xlayer_legacy_rpc::{layer::LegacyRpcRouterLayer, LegacyRpcRouterConfig}; use xlayer_monitor::{start_monitor_handle, RpcMonitorLayer, XLayerMonitor}; -use xlayer_flashblocks::cache::FlashblockStateCache; -use xlayer_rpc::{ - EthApiOverrideServer, XLayerEthApiExt, XlayerRpcExt, XlayerRpcExtApiServer, -}; +use xlayer_rpc::{EthApiOverrideServer, XLayerEthApiExt, XlayerRpcExt, XlayerRpcExtApiServer}; #[global_allocator] static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index 8696507e..a11d836d 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -5,9 +5,7 @@ pub mod eth; pub mod xlayer_ext; pub use eth::{EthApiOverrideServer, XLayerEthApiExt}; -pub use xlayer_ext::{ - SequencerClientProvider, XlayerRpcExt, XlayerRpcExtApiServer, -}; +pub use xlayer_ext::{SequencerClientProvider, XlayerRpcExt, XlayerRpcExtApiServer}; // Implement `SequencerClientProvider` for `OpEthApi` use reth_optimism_rpc::{OpEthApi, SequencerClient}; From 10fd30c9843a72f0efd7d3c167982c19ddd5e0a7 Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 11 Mar 2026 20:09:51 +0800 Subject: [PATCH 15/76] feat(flashblocks-rpc): clean up handle logic and channels MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- bin/node/src/args.rs | 45 ++++-- bin/node/src/main.rs | 37 ++--- bin/node/src/payload.rs | 5 +- crates/flashblocks/src/cache/pending.rs | 30 +--- crates/flashblocks/src/handle.rs | 173 ++++++--------------- crates/flashblocks/src/lib.rs | 19 ++- crates/flashblocks/src/service.rs | 159 +++++++++++++++++++ crates/flashblocks/src/subscription/rpc.rs | 18 ++- crates/flashblocks/src/ws/mod.rs | 7 +- crates/rpc/src/eth.rs | 3 +- 10 files changed, 293 insertions(+), 203 deletions(-) create mode 100644 crates/flashblocks/src/service.rs diff --git a/bin/node/src/args.rs b/bin/node/src/args.rs index 69fa4dd7..a6c2c64a 100644 --- a/bin/node/src/args.rs +++ b/bin/node/src/args.rs @@ -13,6 +13,10 @@ pub struct XLayerArgs { #[command(flatten)] pub builder: BuilderArgs, + /// Flashblocks RPC configuration + #[command(flatten)] + pub flashblocks_rpc: FlashblocksRpcArgs, + /// Enable legacy rpc routing #[command(flatten)] pub legacy: LegacyRpcArgs, @@ -21,22 +25,6 @@ pub struct XLayerArgs { #[command(flatten)] pub monitor: FullLinkMonitorArgs, - /// Enable custom flashblocks subscription - #[arg( - long = "xlayer.flashblocks-subscription", - help = "Enable custom flashblocks subscription (disabled by default)", - default_value = "false" - )] - pub enable_flashblocks_subscription: bool, - - /// Set the number of subscribed addresses in flashblocks subscription - #[arg( - long = "xlayer.flashblocks-subscription-max-addresses", - help = "Set the number of subscribed addresses in flashblocks subscription", - default_value = "1000" - )] - pub flashblocks_subscription_max_addresses: usize, - #[arg( long = "xlayer.sequencer-mode", help = "Enable sequencer mode for the node (default: false, i.e., RPC mode). This flag can be used by various business logic components to determine node behavior.", @@ -135,6 +123,31 @@ impl LegacyRpcArgs { } } +pub struct FlashblocksRpcArgs { + /// Enable flashblocks RPC + #[arg( + long = "xlayer.flashblocks-url", + help = "URL of the flashblocks RPC endpoint (disabled by default)" + )] + pub flashblock_url: Option, + + /// Enable custom flashblocks subscription + #[arg( + long = "xlayer.flashblocks-subscription", + help = "Enable custom flashblocks subscription (disabled by default)", + default_value = "false" + )] + pub enable_flashblocks_subscription: bool, + + /// Set the number of subscribed addresses in flashblocks subscription + #[arg( + long = "xlayer.flashblocks-subscription-max-addresses", + help = "Set the number of subscribed addresses in flashblocks subscription", + default_value = "1000" + )] + pub flashblocks_subscription_max_addresses: usize, +} + #[cfg(test)] mod tests { use super::*; diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index 372cb0f8..65cfb65e 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -23,8 +23,9 @@ use reth_optimism_node::{args::RollupArgs, OpNode}; use reth_rpc_server_types::RethRpcModule; use xlayer_chainspec::XLayerChainSpecParser; -use xlayer_flashblocks::cache::FlashblockStateCache; -use xlayer_flashblocks::{handle::FlashblocksService, subscription::FlashblocksPubSub}; +use xlayer_flashblocks::{ + cache::FlashblockStateCache, FlashblocksPubSub, FlashblocksRpcService, WsFlashBlockStream, +}; use xlayer_legacy_rpc::{layer::LegacyRpcRouterLayer, LegacyRpcRouterConfig}; use xlayer_monitor::{start_monitor_handle, RpcMonitorLayer, XLayerMonitor}; use xlayer_rpc::{EthApiOverrideServer, XLayerEthApiExt, XlayerRpcExt, XlayerRpcExtApiServer}; @@ -108,6 +109,7 @@ fn main() { // It handles both flashblocks and default modes internally let payload_builder = XLayerPayloadServiceBuilder::new( args.xlayer_args.builder.clone(), + args.xlayer_args.flashblocks_rpc.flashblock_url.is_some(), args.rollup_args.compute_pending_block, )?; @@ -123,28 +125,23 @@ fn main() { let new_op_eth_api = Arc::new(ctx.registry.eth_api().clone()); // Initialize flashblocks RPC service if not in flashblocks sequencer mode - if !args.xlayer_args.builder.flashblocks.enabled { - if let Some(flashblock_rx) = new_op_eth_api.subscribe_received_flashblocks() - { - let service = FlashblocksService::new( - ctx.node().clone(), - flashblock_rx, - args.xlayer_args.builder.flashblocks, - args.rollup_args.flashblocks_url.is_some(), - datadir, - )?; - service.spawn(); - info!(target: "reth::cli", "xlayer flashblocks service initialized"); - } + if let Some(flashblock_url) = args.xlayer_args.flashblocks_rpc.flashblock_url { + let stream = WsFlashBlockStream::new(flashblock_url); + let service = FlashblocksRpcService::new( + ctx.node().task_executor().clone(), + stream, + args.xlayer_args.builder.flashblocks, + args.rollup_args.flashblocks_url.is_some(), + datadir, + )?; + service.spawn(); + info!(target: "reth::cli", "xlayer flashblocks service initialized"); if xlayer_args.enable_flashblocks_subscription - && let Some(pending_blocks_rx) = new_op_eth_api.pending_block_rx() { - let eth_pubsub = ctx.registry.eth_handlers().pubsub.clone(); - let flashblocks_pubsub = FlashblocksPubSub::new( - eth_pubsub, - pending_blocks_rx, + ctx.registry.eth_handlers().pubsub.clone(), + service.subscribe_pending_sequence(), Box::new(ctx.node().task_executor().clone()), new_op_eth_api.converter().clone(), xlayer_args.flashblocks_subscription_max_addresses, diff --git a/bin/node/src/payload.rs b/bin/node/src/payload.rs index 49e1b9b5..e308c968 100644 --- a/bin/node/src/payload.rs +++ b/bin/node/src/payload.rs @@ -27,10 +27,12 @@ pub struct XLayerPayloadServiceBuilder { impl XLayerPayloadServiceBuilder { pub fn new( xlayer_builder_args: BuilderArgs, + flashblock_rpc: bool, compute_pending_block: bool, ) -> eyre::Result { Self::with_config( xlayer_builder_args, + flashblock_rpc, compute_pending_block, OpDAConfig::default(), OpGasLimitConfig::default(), @@ -39,11 +41,12 @@ impl XLayerPayloadServiceBuilder { pub fn with_config( xlayer_builder_args: BuilderArgs, + flashblock_rpc: bool, compute_pending_block: bool, da_config: OpDAConfig, gas_limit_config: OpGasLimitConfig, ) -> eyre::Result { - let builder = if xlayer_builder_args.flashblocks.enabled { + let builder = if (xlayer_builder_args.flashblocks.enabled || flashblock_rpc) { let builder_config = BuilderConfig::try_from(xlayer_builder_args)?; XLayerPayloadServiceBuilderInner::Flashblocks(Box::new(FlashblocksServiceBuilder( builder_config, diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs index 7c87fb65..8ebb8869 100644 --- a/crates/flashblocks/src/cache/pending.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -19,15 +19,12 @@ pub struct PendingSequence { tx_index: HashMap>, /// Cached reads from execution for reuse. pub cached_reads: CachedReads, + /// The current block hash of the latest flashblocks sequence. + pub block_hash: B256, /// Parent hash of the built block (may be non-canonical or canonical). pub parent_hash: B256, /// The last flashblock index of the latest flashblocks sequence. pub last_flashblock_index: u64, - /// Whether the [`PendingFlashblockSequence`] has a properly computed stateroot. - pub has_computed_state_root: bool, - /// The current block hash of the latest flashblocks sequence. `None` if state - /// root is not computed yet. - pub block_hash: Option, } impl PendingSequence { @@ -36,30 +33,11 @@ impl PendingSequence { pending: PendingBlock, tx_index: HashMap>, cached_reads: CachedReads, + block_hash: B256, parent_hash: B256, last_flashblock_index: u64, ) -> Self { - Self { - pending, - tx_index, - cached_reads, - parent_hash, - last_flashblock_index, - has_computed_state_root: false, - block_hash: None, - } - } - - /// Returns the properly calculated state root for that block if it was computed. - pub fn computed_state_root(&self) -> Option { - self.has_computed_state_root.then_some(self.pending.block().state_root()) - } - - /// Sets the computed state root and block hash for the pending block. - pub fn set_state_root_and_block_hash(&mut self, pending: PendingBlock) { - self.pending = pending; - self.block_hash = Some(self.pending.block().hash()); - self.has_computed_state_root = true; + Self { pending, tx_index, cached_reads, block_hash, parent_hash, last_flashblock_index } } pub fn get_height(&self) -> u64 { diff --git a/crates/flashblocks/src/handle.rs b/crates/flashblocks/src/handle.rs index 9c485dd1..0ba9fb7a 100644 --- a/crates/flashblocks/src/handle.rs +++ b/crates/flashblocks/src/handle.rs @@ -1,130 +1,13 @@ -use std::{net::SocketAddr, sync::Arc, time::Duration}; -use tracing::{debug, info, trace, warn}; +use crate::ReceivedFlashblocksRx; +use std::{sync::Arc, time::Duration}; +use tracing::*; -use reth_node_api::FullNodeComponents; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_optimism_flashblocks::{FlashBlock, FlashBlockRx}; -use xlayer_builder::{ - args::FlashblocksArgs, - flashblocks::{FlashblockPayloadsCache, WebSocketPublisher}, - metrics::{tokio::FlashblocksTaskMetrics, BuilderMetrics}, -}; - -pub struct FlashblocksService -where - Node: FullNodeComponents, -{ - node: Node, - flashblock_rx: FlashBlockRx, - ws_pub: Arc, - relay_flashblocks: bool, - datadir: ChainPath, -} - -impl FlashblocksService -where - Node: FullNodeComponents, -{ - pub fn new( - node: Node, - flashblock_rx: FlashBlockRx, - args: FlashblocksArgs, - relay_flashblocks: bool, - datadir: ChainPath, - ) -> Result { - let ws_addr = SocketAddr::new(args.flashblocks_addr.parse()?, args.flashblocks_port); - - let metrics = Arc::new(BuilderMetrics::default()); - let task_metrics = Arc::new(FlashblocksTaskMetrics::new()); - let ws_pub = Arc::new( - WebSocketPublisher::new( - ws_addr, - metrics, - &task_metrics.websocket_publisher, - args.ws_subscriber_limit, - ) - .map_err(|e| eyre::eyre!("Failed to create WebSocket publisher: {e}"))?, - ); - - info!(target: "flashblocks", "WebSocket publisher initialized at {}", ws_addr); - - Ok(Self { node, flashblock_rx, ws_pub, relay_flashblocks, datadir }) - } - - pub fn spawn(mut self) { - debug!(target: "flashblocks", "Initializing flashblocks service"); - - let task_executor = self.node.task_executor().clone(); - if self.relay_flashblocks { - let datadir = self.datadir.clone(); - let flashblock_rx = self.flashblock_rx.resubscribe(); - task_executor.spawn_critical( - "xlayer-flashblocks-persistence", - Box::pin(async move { - handle_persistence(flashblock_rx, datadir).await; - }), - ); - - task_executor.spawn_critical( - "xlayer-flashblocks-publish", - Box::pin(async move { - self.publish().await; - }), - ); - } - } - - async fn publish(&mut self) { - info!( - target: "flashblocks", - "Flashblocks websocket publisher started" - ); - - loop { - match self.flashblock_rx.recv().await { - Ok(flashblock) => { - trace!( - target: "flashblocks", - "Received flashblock: index={}, block_hash={}", - flashblock.index, - flashblock.diff.block_hash - ); - self.publish_flashblock(&flashblock).await; - } - Err(e) => { - warn!(target: "flashblocks", "Flashblock receiver error: {:?}", e); - break; - } - } - } - - info!(target: "flashblocks", "Flashblocks service stopped"); - } - - /// Relays the incoming flashblock to the flashblock websocket subscribers. - async fn publish_flashblock(&self, flashblock: &Arc) { - match self.ws_pub.publish(flashblock) { - Ok(_) => { - trace!( - target: "flashblocks", - "Published flashblock: index={}, block_hash={}", - flashblock.index, - flashblock.diff.block_hash - ); - } - Err(e) => { - warn!( - target: "flashblocks", - "Failed to publish flashblock: {:?}", e - ); - } - } - } -} +use xlayer_builder::flashblocks::{FlashblockPayloadsCache, WebSocketPublisher}; /// Handles the persistence of the pending flashblocks sequence to disk. -async fn handle_persistence(mut rx: FlashBlockRx, datadir: ChainPath) { +pub async fn handle_persistence(mut rx: ReceivedFlashblocksRx, datadir: ChainPath) { let cache = FlashblockPayloadsCache::new(Some(datadir)); // Set default flush interval to 5 seconds @@ -166,3 +49,49 @@ async fn handle_persistence(mut rx: FlashBlockRx, datadir: ChainPath, +) { + info!( + target: "flashblocks", + "Flashblocks websocket publisher started" + ); + + loop { + match rx.recv().await { + Ok(flashblock) => { + trace!( + target: "flashblocks", + "Received flashblock: index={}, block_hash={}", + flashblock.index, + flashblock.diff.block_hash + ); + match ws_pub.publish(&flashblock) { + Ok(_) => { + trace!( + target: "flashblocks", + "Published flashblock: index={}, block_hash={}", + flashblock.index, + flashblock.diff.block_hash + ); + } + Err(e) => { + warn!( + target: "flashblocks", + "Failed to publish flashblock: {:?}", e + ); + } + } + } + Err(e) => { + warn!(target: "flashblocks", "Flashblock receiver error: {:?}", e); + break; + } + } + } + info!(target: "flashblocks", "Flashblocks service stopped"); +} diff --git a/crates/flashblocks/src/lib.rs b/crates/flashblocks/src/lib.rs index 2f128506..25d8ee26 100644 --- a/crates/flashblocks/src/lib.rs +++ b/crates/flashblocks/src/lib.rs @@ -1,13 +1,22 @@ //! X-Layer flashblocks crate. -pub mod cache; +mod cache; mod execution; -pub mod handle; -pub mod subscription; +pub(crate) mod handle; +mod service; +mod subscription; mod ws; #[cfg(test)] mod test_utils; -pub use execution::FlashblockCachedReceipt; -pub use ws::{FlashBlockDecoder, WsConnect, WsFlashBlockStream}; +pub use cache::PendingSequence; +pub use service::FlashblocksRpcService; +pub use subscription::FlashblocksPubSub; +pub use ws::WsFlashBlockStream; + +use op_alloy_rpc_types_engine::OpFlashblockPayload; +use std::sync::Arc; + +pub type PendingSequenceRx = tokio::sync::watch::Receiver>>; +pub type ReceivedFlashblocksRx = tokio::sync::broadcast::Receiver>; diff --git a/crates/flashblocks/src/service.rs b/crates/flashblocks/src/service.rs new file mode 100644 index 00000000..35aa75d5 --- /dev/null +++ b/crates/flashblocks/src/service.rs @@ -0,0 +1,159 @@ +use crate::{ + handle::{handle_persistence, handle_relay_flashblocks}, + ReceivedFlashblocksRx, +}; +use futures_util::{FutureExt, Stream, StreamExt}; +use std::{net::SocketAddr, sync::Arc, time::Duration}; +use tokio::time::sleep; +use tracing::*; + +use op_alloy_rpc_types_engine::OpFlashblockPayload; +use reth_node_core::dirs::{ChainPath, DataDirPath}; +use reth_tasks::TaskExecutor; + +use xlayer_builder::{ + args::FlashblocksArgs, + flashblocks::WebSocketPublisher, + metrics::{tokio::FlashblocksTaskMetrics, BuilderMetrics}, +}; + +const CONNECTION_BACKOUT_PERIOD: Duration = Duration::from_secs(5); + +pub struct FlashblocksRpcService { + /// Incoming flashblock stream. + incoming_flashblock_rx: S, + /// Broadcast channel to forward received flashblocks from the subscription. + received_flashblocks_tx: tokio::sync::broadcast::Sender>, + /// Task executor. + task_executor: TaskExecutor, + /// Flashblocks websocket publisher for relaying flashblocks to subscribers. + ws_pub: Arc, + /// Whether to relay flashblocks to the subscribers. + relay_flashblocks: bool, + /// Data directory for flashblocks persistence. + datadir: ChainPath, +} + +impl FlashblocksRpcService +where + S: Stream> + Unpin + 'static, +{ + pub fn new( + task_executor: TaskExecutor, + incoming_flashblock_rx: S, + args: FlashblocksArgs, + relay_flashblocks: bool, + datadir: ChainPath, + ) -> Result { + let (received_flashblocks_tx, _) = tokio::sync::broadcast::channel(128); + + // Initialize ws publisher for relaying flashblocks + let ws_addr = SocketAddr::new(args.flashblocks_addr.parse()?, args.flashblocks_port); + let metrics = Arc::new(BuilderMetrics::default()); + let task_metrics = Arc::new(FlashblocksTaskMetrics::new()); + let ws_pub = Arc::new( + WebSocketPublisher::new( + ws_addr, + metrics, + &task_metrics.websocket_publisher, + args.ws_subscriber_limit, + ) + .map_err(|e| eyre::eyre!("Failed to create WebSocket publisher: {e}"))?, + ); + info!(target: "flashblocks", "WebSocket publisher initialized at {}", ws_addr); + + Ok(Self { + incoming_flashblock_rx, + received_flashblocks_tx, + task_executor, + ws_pub, + relay_flashblocks, + datadir, + }) + } + + /// Returns a new subscription to received flashblocks. + pub fn subscribe_received_flashblocks(&self) -> ReceivedFlashblocksRx { + self.received_flashblocks_tx.subscribe() + } + + pub fn spawn(&self) { + debug!(target: "flashblocks", "Initializing flashblocks service"); + // Spawn persistence handle + let datadir = self.datadir.clone(); + let rx = self.subscribe_received_flashblocks(); + self.task_executor.spawn_critical( + "xlayer-flashblocks-persistence", + Box::pin(async move { + handle_persistence(rx, datadir).await; + }), + ); + + // Spawn relayer handle + if self.relay_flashblocks { + let rx = self.subscribe_received_flashblocks(); + let ws_pub = self.ws_pub.clone(); + self.task_executor.spawn_critical( + "xlayer-flashblocks-publish", + Box::pin(async move { + handle_relay_flashblocks(rx, ws_pub).await; + }), + ); + } + } + + /// Contains the main logic for processing raw incoming flashblocks, and updating the + /// flashblocks state cache layer. The logic pipeline is as follows: + /// 1. Notifies subscribers + /// 2. Inserts into the raw flashblocks cache + pub async fn handle_flashblocks(&mut self) { + loop { + tokio::select! { + // Event 1: New flashblock arrives (batch process all ready flashblocks) + result = self.incoming_flashblock_rx.next() => { + match result { + Some(Ok(flashblock)) => { + // Process first flashblock + self.process_flashblock(flashblock); + + // Batch process all other immediately available flashblocks + while let Some(result) = self.incoming_flashblock_rx.next().now_or_never().flatten() { + match result { + Ok(fb) => self.process_flashblock(fb), + Err(err) => warn!(target: "flashblocks", %err, "Error receiving flashblock"), + } + } + } + Some(Err(err)) => { + warn!( + target: "flashblocks", + %err, + retry_period = CONNECTION_BACKOUT_PERIOD.as_secs(), + "Error receiving flashblock" + ); + sleep(CONNECTION_BACKOUT_PERIOD).await; + } + None => { + warn!(target: "flashblocks", "Flashblock stream ended"); + break; + } + } + } + } + } + } + + /// Processes a single flashblock: notifies subscribers, and inserts into + /// the raw flashblocks cache. + fn process_flashblock(&mut self, flashblock: OpFlashblockPayload) { + self.notify_received_flashblock(&flashblock); + // TODO: Insert into the raw flashblocks cache + } + + /// Notifies all subscribers about the received flashblock. + fn notify_received_flashblock(&self, flashblock: &OpFlashblockPayload) { + if self.received_flashblocks_tx.receiver_count() > 0 { + let _ = self.received_flashblocks_tx.send(Arc::new(flashblock.clone())); + } + } +} diff --git a/crates/flashblocks/src/subscription/rpc.rs b/crates/flashblocks/src/subscription/rpc.rs index ec11bb98..54254611 100644 --- a/crates/flashblocks/src/subscription/rpc.rs +++ b/crates/flashblocks/src/subscription/rpc.rs @@ -1,6 +1,9 @@ -use super::pubsub::{ - EnrichedTransaction, FlashblockParams, FlashblockStreamEvent, FlashblockSubscriptionKind, - FlashblocksFilter, +use crate::{ + subscription::pubsub::{ + EnrichedTransaction, FlashblockParams, FlashblockStreamEvent, FlashblockSubscriptionKind, + FlashblocksFilter, + }, + PendingSequence, PendingSequenceRx, }; use futures::StreamExt; @@ -18,7 +21,6 @@ use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, TxHash, U256}; use alloy_rpc_types_eth::{Header, TransactionInfo}; -use reth_optimism_flashblocks::{PendingBlockRx, PendingFlashBlock}; use reth_primitives_traits::{ NodePrimitives, Recovered, RecoveredBlock, SealedBlock, TransactionMeta, }; @@ -90,7 +92,7 @@ where /// Subscription tasks are spawned via [`tokio::task::spawn`] pub fn new( eth_pubsub: EthPubSub, - pending_block_rx: PendingBlockRx, + pending_block_rx: PendingSequenceRx, subscription_task_spawner: Box, tx_converter: Eth::RpcConvert, max_subscribed_addresses: usize, @@ -196,7 +198,7 @@ where #[derive(Clone)] pub struct FlashblocksPubSubInner { /// Pending block receiver from flashblocks, if available - pub(crate) pending_block_rx: PendingBlockRx, + pub(crate) pending_block_rx: PendingSequenceRx, /// The type that's used to spawn subscription tasks. pub(crate) subscription_task_spawner: Box, /// RPC transaction converter. @@ -236,7 +238,7 @@ where /// Convert a flashblock into a stream of events (header + transaction messages) fn flashblock_to_stream_events( - pending_block: &PendingFlashBlock, + pending_block: &PendingSequence, filter: &FlashblocksFilter, tx_converter: &Eth::RpcConvert, txhash_cache: &Cache, @@ -485,7 +487,7 @@ where /// Extract `Header` from `PendingFlashBlock` fn extract_header_from_pending_block( - pending_block: &PendingFlashBlock, + pending_block: &PendingSequence, ) -> Result, ErrorObject<'static>> { let block = pending_block.block(); Ok(Header::from_consensus( diff --git a/crates/flashblocks/src/ws/mod.rs b/crates/flashblocks/src/ws/mod.rs index 651d83c9..3a69d13f 100644 --- a/crates/flashblocks/src/ws/mod.rs +++ b/crates/flashblocks/src/ws/mod.rs @@ -1,6 +1,5 @@ -pub use stream::{WsConnect, WsFlashBlockStream}; - mod decoding; -pub use decoding::FlashBlockDecoder; - mod stream; + +pub use decoding::FlashBlockDecoder; +pub use stream::WsFlashBlockStream; diff --git a/crates/rpc/src/eth.rs b/crates/rpc/src/eth.rs index 80fed97d..704c9e45 100644 --- a/crates/rpc/src/eth.rs +++ b/crates/rpc/src/eth.rs @@ -35,7 +35,8 @@ use reth_rpc_eth_api::{ EthApiTypes, EthFilterApiServer, RpcBlock, RpcReceipt, }; use tracing::debug; -use xlayer_flashblocks::cache::FlashblockStateCache; + +use xlayer_flashblocks::FlashblockStateCache; // --------------------------------------------------------------------------- // EthApiOverride — flashblocks `eth_*` method overrides From c9a18d86e3181d1a580da0f9e7d4316bafd7ff7a Mon Sep 17 00:00:00 2001 From: Niven Date: Thu, 12 Mar 2026 21:06:18 +0800 Subject: [PATCH 16/76] feat(flashblock-rpc): fix flashblocks eth extensions and helpers, clean up cache interfaces MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- bin/node/src/main.rs | 19 +- crates/flashblocks/src/cache/confirm.rs | 9 +- crates/flashblocks/src/cache/mod.rs | 137 ++-- crates/flashblocks/src/cache/pending.rs | 13 +- crates/flashblocks/src/lib.rs | 2 +- crates/rpc/src/eth.rs | 906 ++++++++---------------- crates/rpc/src/helper.rs | 164 +++++ crates/rpc/src/lib.rs | 1 + crates/rpc/src/xlayer_ext.rs | 27 +- 9 files changed, 554 insertions(+), 724 deletions(-) create mode 100644 crates/rpc/src/helper.rs diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index 65cfb65e..a9aecb37 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -156,30 +156,23 @@ fn main() { // Create flashblocks state cache if flashblocks URL is configured. // Shared between the Eth API override and the ext RPC. - let flash_cache = if args.rollup_args.flashblocks_url.is_some() { - let canon_height = ctx.node().provider().best_block_number()?; - Some(FlashblockStateCache::new(canon_height)) - } else { - None - }; + let flashblocks_state = args.rollup_args.flashblocks_url.map(|_| FlashblockStateCache::new()); // Register flashblocks Eth API override (replaces subset of eth_ methods) - if let Some(ref cache) = flash_cache { - let eth_filter = ctx.registry.eth_handlers().filter.clone(); - let eth_override = XLayerEthApiExt::new( + if let Some(fb_cache) = flashblocks_state.as_ref() { + let flashblocks_eth = XLayerEthApiExt::new( ctx.registry.eth_api().clone(), - eth_filter, - cache.clone(), + fb_cache.clone(), ); ctx.modules.add_or_replace_if_module_configured( RethRpcModule::Eth, - EthApiOverrideServer::into_rpc(eth_override), + EthApiOverrideServer::into_rpc(flashblocks_eth), )?; info!(target: "reth::cli", "xlayer flashblocks eth api override enabled"); } // Register X Layer RPC (eth_flashblocksEnabled) — always active - let xlayer_rpc = XlayerRpcExt::new(flash_cache); + let xlayer_rpc = XlayerRpcExt::new(flashblocks_state); ctx.modules.merge_configured(XlayerRpcExtApiServer::into_rpc( xlayer_rpc, ))?; diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index 692667a7..cdf2f897 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -1,4 +1,4 @@ -use crate::cache::CachedTxInfo; +use crate::CachedTxInfo; use std::collections::{BTreeMap, HashMap}; use alloy_consensus::transaction::TxHashRef; @@ -119,8 +119,11 @@ impl ConfirmCache { } /// Returns the cached transaction info for the given tx hash, if present. - pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option> { - self.tx_index.get(tx_hash).cloned() + /// Returns the cached transaction info for the given tx hash, if present. + pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option<(CachedTxInfo, BlockAndReceipts)> { + let tx_info = self.tx_index.get(tx_hash).cloned()?; + let block = self.get_block_by_number(tx_info.block_number)?; + Some((tx_info, block)) } /// Returns `true` if the cache contains a block with the given hash. diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 6c0209e1..43c29735 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -12,6 +12,7 @@ use std::sync::Arc; use tracing::*; use alloy_primitives::{TxHash, B256}; +use alloy_rpc_types_eth::{BlockId, BlockNumberOrTag}; use reth_primitives_traits::{NodePrimitives, ReceiptTy}; use reth_rpc_eth_types::block::BlockAndReceipts; @@ -66,66 +67,43 @@ impl FlashblockStateCache { self.inner.read().confirm_height } - /// Returns the current pending height. - pub fn get_pending_height(&self) -> u64 { - let inner = self.inner.read(); - inner.pending_cache.as_ref().map_or(inner.confirm_height, |p| p.get_height()) + /// Returns the current pending height, if any. + pub fn get_pending_height(&self) -> Option { + self.inner.read().pending_cache.as_ref().map(|p| p.get_height()) + } + + pub fn get_rpc_block_by_id(&self, block_id: Option) -> Option> { + match block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)) { + BlockId::Number(id) => self.get_rpc_block(id), + BlockId::Hash(hash) => self.get_block_by_hash(&hash.block_hash), + } + } + + /// Returns the current pending block and receipts, if any. + pub fn get_rpc_block(&self, block_id: BlockNumberOrTag) -> Option> { + match block_id { + BlockNumberOrTag::Pending => self.inner.read().get_pending_block(), + BlockNumberOrTag::Latest => self.inner.read().get_confirmed_block(), + BlockNumberOrTag::Number(num) => self.get_block_by_number(num), + _ => None, + } } /// Returns the block for the given block number, if cached. pub fn get_block_by_number(&self, num: u64) -> Option> { - self.inner.read().confirm_cache.get_block_by_number(num) + self.inner.read().get_block_by_number(num) } /// Returns the confirmed block for the given block hash, if cached. pub fn get_block_by_hash(&self, hash: &B256) -> Option> { - self.inner.read().confirm_cache.get_block_by_hash(hash) + self.inner.read().get_block_by_hash(hash) } - // --- Pending block --- - - /// Returns the current pending block and receipts, if any. - pub fn get_pending_block(&self) -> Option> { - self.inner.read().pending_cache.as_ref().map(|p| p.get_block()) - } - - // --- Transaction/receipt lookup (pending + confirm) --- - /// Looks up cached transaction info by hash: pending sequence first, then /// confirm cache. Returns `None` if the tx is not in either cache layer. - pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option> { + pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option<(CachedTxInfo, BlockAndReceipts)> { self.inner.read().get_tx_info(tx_hash) } - - // --- Hash/number mapping --- - - /// Returns the block hash for the given block number, if cached in the - /// confirm cache. - pub fn get_block_hash(&self, num: u64) -> Option { - self.inner.read().confirm_cache.hash_for_number(num) - } - - /// Returns the block number for the given block hash, if cached in the - /// confirm cache. - pub fn get_block_number(&self, hash: &B256) -> Option { - self.inner.read().confirm_cache.number_for_hash(hash) - } - - // --- Range queries (for `eth_getLogs`) --- - - /// Returns all cached confirmed blocks in the inclusive range `[start..=end]`. - /// Blocks not present in the cache are skipped (the caller must fill gaps - /// from the provider). - pub fn get_blocks_in_range(&self, start: u64, end: u64) -> Vec> { - let inner = self.inner.read(); - let mut result = Vec::new(); - for num in start..=end { - if let Some(bar) = inner.confirm_cache.get_block_by_number(num) { - result.push(bar); - } - } - result - } } // FlashblockStateCache state mutation interfaces. @@ -139,10 +117,10 @@ impl FlashblockStateCache { /// If the pending sequence to be updated is the same as the current pending /// sequence, it will replace the existing with the incoming pending sequence. /// - /// Note that this state update is fallible as it detects potential reorgs, and - /// triggers cache flush on invalidate entries. An entry is invalidated if the - /// incoming pending sequence height is not the next pending height or current - /// pending height. + /// Note that this state update is fallible if something goes really wrong here + /// as it detects potential reorgs and flashblocks state cache pollution. An entry + /// is invalidated if the incoming pending sequence height is not the next pending + /// height or current pending height. pub fn handle_pending_sequence( &self, pending_sequence: PendingSequence, @@ -150,10 +128,15 @@ impl FlashblockStateCache { self.inner.write().handle_pending_sequence(pending_sequence) } - /// Handles a canonical block commit by flushing stale confirmed entries and - /// the pending state if it matches the committed block. + /// Handles a canonical block committed to the canonical chainstate. /// - /// If reorg flag is set, the flashblocks state cache will be default be flushed. + /// This method will flush the confirm cache up to the canonical block height and + /// the pending state if it matches the committed block to ensure flashblocks state + /// cache memory does not grow unbounded. + /// + /// It also detects chainstate re-orgs (set with re-org arg flag) and flashblocks + /// state cache pollution. By default once error is detected, we will automatically + /// flush the flashblocks state cache. pub fn handle_canonical_block(&self, block_number: u64, reorg: bool) { self.inner.write().handle_canonical_block(block_number, reorg) } @@ -166,8 +149,8 @@ struct FlashblockStateCacheInner { pending_cache: Option>, /// Cache of confirmed flashblock sequences ahead of the canonical chain. confirm_cache: ConfirmCache, - /// The highest confirmed block height of from both the confirm cache or - /// the pending cache. + /// Highest confirmed block height in the confirm cache. If flashblocks state cache + /// is uninitialized, the confirm height is set to 0. confirm_height: u64, } @@ -176,15 +159,6 @@ impl FlashblockStateCacheInner { Self { pending_cache: None, confirm_cache: ConfirmCache::new(), confirm_height: 0 } } - /// Looks up cached transaction info by hash: pending sequence first, then - /// confirm cache. Returns `None` if the tx is not in either cache layer. - fn get_tx_info(&self, tx_hash: &TxHash) -> Option> { - self.pending_cache - .as_ref() - .and_then(|p| p.get_tx_info(tx_hash)) - .or_else(|| self.confirm_cache.get_tx_info(tx_hash)) - } - /// Handles flushing a newly confirmed block to the confirm cache. Note that /// this state update is fallible as it detects potential reorgs, and triggers /// cache flush on invalidate entries. @@ -228,7 +202,7 @@ impl FlashblockStateCacheInner { "polluted state cache - trying to advance pending tip but no current pending" ) })?; - self.handle_confirmed_block(expected_height, sequence.get_block())?; + self.handle_confirmed_block(expected_height, sequence.get_block_and_receipts())?; self.pending_cache = Some(pending_sequence); } else if pending_height == expected_height { // Replace the existing pending sequence @@ -271,4 +245,37 @@ impl FlashblockStateCacheInner { self.pending_cache = None; self.confirm_cache.clear(); } + + pub fn get_confirmed_block(&self) -> Option> { + self.get_block_by_number(self.confirm_height) + } + + pub fn get_pending_block(&self) -> Option> { + self.pending_cache.as_ref().map(|p| p.get_block_and_receipts()) + } + + pub fn get_block_by_number(&self, num: u64) -> Option> { + if let Some(pending_sequence) = self.pending_cache.as_ref() + && pending_sequence.get_height() == num + { + return Some(pending_sequence.get_block_and_receipts()); + } + self.confirm_cache.get_block_by_number(num) + } + + pub fn get_block_by_hash(&self, hash: &B256) -> Option> { + if let Some(pending_sequence) = self.pending_cache.as_ref() + && pending_sequence.get_hash() == *hash + { + return Some(pending_sequence.get_block_and_receipts()); + } + self.confirm_cache.get_block_by_hash(hash) + } + + fn get_tx_info(&self, tx_hash: &TxHash) -> Option<(CachedTxInfo, BlockAndReceipts)> { + self.pending_cache + .as_ref() + .and_then(|p| p.get_tx_info(tx_hash)) + .or_else(|| self.confirm_cache.get_tx_info(tx_hash)) + } } diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs index 8ebb8869..249251bc 100644 --- a/crates/flashblocks/src/cache/pending.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -40,18 +40,25 @@ impl PendingSequence { Self { pending, tx_index, cached_reads, block_hash, parent_hash, last_flashblock_index } } + pub fn get_hash(&self) -> B256 { + self.block_hash + } + pub fn get_height(&self) -> u64 { self.pending.block().number() } - pub fn get_block(&self) -> BlockAndReceipts { + pub fn get_block_and_receipts(&self) -> BlockAndReceipts { self.pending.to_block_and_receipts() } /// Returns the cached transaction info for the given tx hash, if present /// in the pending sequence. - pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option> { - self.tx_index.get(tx_hash).cloned() + pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option<(CachedTxInfo, BlockAndReceipts)> { + self.tx_index + .get(tx_hash) + .cloned() + .map(|tx_info| (tx_info, self.pending.to_block_and_receipts())) } } diff --git a/crates/flashblocks/src/lib.rs b/crates/flashblocks/src/lib.rs index 25d8ee26..20bd2ec7 100644 --- a/crates/flashblocks/src/lib.rs +++ b/crates/flashblocks/src/lib.rs @@ -10,7 +10,7 @@ mod ws; #[cfg(test)] mod test_utils; -pub use cache::PendingSequence; +pub use cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}; pub use service::FlashblocksRpcService; pub use subscription::FlashblocksPubSub; pub use ws::WsFlashBlockStream; diff --git a/crates/rpc/src/eth.rs b/crates/rpc/src/eth.rs index 704c9e45..13d84c81 100644 --- a/crates/rpc/src/eth.rs +++ b/crates/rpc/src/eth.rs @@ -1,797 +1,453 @@ -//! Eth API override module for flashblocks RPC. -//! -//! Provides `EthApiOverride` — a jsonrpsee `#[rpc]` trait that overrides a -//! subset of `eth_*` methods to serve flashblocks data from the -//! [`FlashblockStateCache`] alongside canonical chain data from the inner -//! `Eth` API. -//! -//! Also provides `XlayerRpcExtApi` — a separate `#[rpc]` trait that exposes -//! X Layer-specific methods like `eth_flashblocksEnabled`. -//! -//! The override handler checks the flashblocks cache first for confirmed and -//! pending blocks, then falls back to the canonical `eth_api` for all other -//! queries. For transaction/receipt lookups, canonical is checked **first** to -//! avoid a race condition where the cache hasn't been cleared yet after a -//! canonical block commit. +use crate::helper::{ + to_block_receipts, to_rpc_block, to_rpc_receipt, to_rpc_transaction_from_bar_and_index, +}; +use jsonrpsee::{ + core::{async_trait, RpcResult}, + proc_macros::rpc, +}; +use tracing::*; use alloy_consensus::BlockHeader; use alloy_eips::{BlockId, BlockNumberOrTag}; -use alloy_primitives::{Address, Bytes, TxHash, U256}; +use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; use alloy_rpc_types_eth::{ state::{EvmOverrides, StateOverride}, - BlockOverrides, Filter, Log, TransactionInfo, -}; -use jsonrpsee::{ - core::{async_trait, RpcResult}, - proc_macros::rpc, + BlockOverrides, Filter, Index, Log, TransactionInfo, }; use op_alloy_network::Optimism; +use op_alloy_rpc_types::OpTransactionRequest; + use reth_optimism_primitives::OpPrimitives; +use reth_optimism_rpc::eth::OpEthApi; use reth_primitives_traits::{BlockBody, NodePrimitives, SignerRecoverable}; use reth_rpc::eth::EthFilter; -use reth_rpc_convert::{RpcConvert, RpcTransaction}; +use reth_rpc_convert::RpcTransaction; use reth_rpc_eth_api::{ helpers::{EthBlocks, EthCall, EthState, EthTransactions, FullEthApi}, - EthApiTypes, EthFilterApiServer, RpcBlock, RpcReceipt, + EthApiServer, EthApiTypes, RpcBlock, RpcReceipt, }; -use tracing::debug; use xlayer_flashblocks::FlashblockStateCache; -// --------------------------------------------------------------------------- -// EthApiOverride — flashblocks `eth_*` method overrides -// --------------------------------------------------------------------------- - -/// Eth API override trait for flashblocks integration. -/// -/// Methods in this trait override the default `eth_*` JSON-RPC namespace -/// handlers when flashblocks are active. They are registered via -/// `add_or_replace_if_module_configured` to replace the corresponding -/// default implementations. +/// Eth API override for flashblocks RPC integration. #[cfg_attr(not(test), rpc(server, namespace = "eth"))] #[cfg_attr(test, rpc(server, client, namespace = "eth"))] -pub trait EthApiOverride { - // --- Block queries --- - - /// Returns the current block number, accounting for confirmed flashblocks. +pub trait FlashblocksEthApiOverride { + // ----------------- Block apis ----------------- + /// Returns the current block number, with the flashblocks state cache overlay. #[method(name = "blockNumber")] async fn block_number(&self) -> RpcResult; - /// Returns a block by number, with flashblocks support for pending/confirmed. + /// Returns block by number, with the flashblock state cache overlay support for pending and + /// confirmed blocks. #[method(name = "getBlockByNumber")] - async fn get_block_by_number( + async fn block_by_number( &self, number: BlockNumberOrTag, full: bool, ) -> RpcResult>>; - /// Returns a block by hash, checking flashblocks confirm cache first. + /// Returns block by block hash, with the flashblock state cache overlay support for pending + /// and confirmed blocks. #[method(name = "getBlockByHash")] - async fn get_block_by_hash( + async fn block_by_hash(&self, hash: B256, full: bool) -> RpcResult>>; + + /// Returns all the receipts in a block by block number, with the flashblock state cache + /// overlay support for pending and confirmed blocks. + #[method(name = "getBlockReceipts")] + async fn block_receipts( &self, - hash: alloy_primitives::B256, - full: bool, - ) -> RpcResult>>; + block_id: BlockNumberOrTag, + ) -> RpcResult>>>; - /// Returns the transaction count for a block by number. + /// Returns the number of transactions in a block by block number, with the flashblock state + /// cache overlay support for pending and confirmed blocks. #[method(name = "getBlockTransactionCountByNumber")] - async fn get_block_transaction_count_by_number( + async fn block_transaction_count_by_number( &self, number: BlockNumberOrTag, ) -> RpcResult>; - /// Returns the transaction count for a block by hash. + /// Returns the number of transactions in a block by block hash, with the flashblock state + /// cache overlay support for pending and confirmed blocks. #[method(name = "getBlockTransactionCountByHash")] - async fn get_block_transaction_count_by_hash( - &self, - hash: alloy_primitives::B256, - ) -> RpcResult>; + async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult>; - /// Returns all receipts for a block. - #[method(name = "getBlockReceipts")] - async fn get_block_receipts( - &self, - block_id: BlockNumberOrTag, - ) -> RpcResult>>>; - - // --- Transaction queries --- - - /// Returns a transaction by hash (canonical-first to avoid race conditions). + // ----------------- Transaction apis ----------------- + /// Returns the information about a transaction requested by transaction hash, with the + /// flashblock state cache overlay support for pending and confirmed blocks. #[method(name = "getTransactionByHash")] - async fn get_transaction_by_hash( + async fn transaction_by_hash( &self, hash: TxHash, ) -> RpcResult>>; - /// Returns a transaction by block hash and index. + /// Returns the EIP-2718 encoded transaction if it exists, with the flashblock state cache + /// overlay support for pending and confirmed blocks. + #[method(name = "getRawTransactionByHash")] + async fn raw_transaction_by_hash(&self, hash: B256) -> RpcResult>; + + /// Returns the receipt of a transaction by transaction hash, with the flashblock state cache + /// overlay support for pending and confirmed blocks. + #[method(name = "getTransactionReceipt")] + async fn transaction_receipt(&self, hash: TxHash) -> RpcResult>>; + + /// Returns information about a raw transaction by block hash and transaction index position, + /// with the flashblock state cache overlay support for pending and confirmed blocks. #[method(name = "getTransactionByBlockHashAndIndex")] - async fn get_transaction_by_block_hash_and_index( + async fn transaction_by_block_hash_and_index( &self, - block_hash: alloy_primitives::B256, - index: alloy_eips::BlockNumberOrTag, + block_hash: B256, + index: Index, ) -> RpcResult>>; - /// Returns a transaction by block number and index. + /// Returns information about a transaction by block number and transaction index position, + /// with the flashblock state cache overlay support for pending and confirmed blocks. #[method(name = "getTransactionByBlockNumberAndIndex")] - async fn get_transaction_by_block_number_and_index( + async fn transaction_by_block_number_and_index( &self, - block_number: BlockNumberOrTag, - index: alloy_eips::BlockNumberOrTag, + number: BlockNumberOrTag, + index: Index, ) -> RpcResult>>; - /// Returns a transaction receipt (canonical-first to avoid race conditions). - #[method(name = "getTransactionReceipt")] - async fn get_transaction_receipt( + /// Returns information about a raw transaction by block hash and transaction index position, + /// with the flashblock state cache overlay support for pending and confirmed blocks. + #[method(name = "getRawTransactionByBlockHashAndIndex")] + async fn raw_transaction_by_block_hash_and_index( &self, - hash: TxHash, - ) -> RpcResult>>; + hash: B256, + index: Index, + ) -> RpcResult>; + + /// Returns information about a raw transaction by block number and transaction index position, + /// with the flashblock state cache overlay support for pending and confirmed blocks. + #[method(name = "getRawTransactionByBlockNumberAndIndex")] + async fn raw_transaction_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult>; + + /// Sends a signed transaction and awaits the transaction receipt, with the flashblock state cache + /// overlay support for pending and confirmed blocks. + /// + /// This will return a timeout error if the transaction isn't included within some time period. + #[method(name = "sendRawTransactionSync")] + async fn send_raw_transaction_sync(&self, bytes: Bytes) -> RpcResult; + + // ----------------- State apis ----------------- + /// Executes a new message call immediately without creating a transaction on the block chain, + /// with the flashblock state cache overlay support for pending and confirmed block states. + #[method(name = "call")] + async fn call( + &self, + transaction: OpTransactionRequest, + block_number: Option, + state_overrides: Option, + block_overrides: Option>, + ) -> RpcResult; - // --- State queries --- + /// Generates and returns an estimate of how much gas is necessary to allow the transaction to + /// complete, with the flashblock state cache overlay support for pending and confirmed block + /// states. + #[method(name = "estimateGas")] + async fn estimate_gas( + &self, + transaction: OpTransactionRequest, + block_number: Option, + overrides: Option, + ) -> RpcResult; + /// Returns the balance of the account of given address, with the flashblock state cache + /// overlay support for pending and confirmed block states. - /// Returns account balance, with flashblocks support for pending state. #[method(name = "getBalance")] - async fn get_balance(&self, address: Address, block_number: Option) - -> RpcResult; + async fn balance(&self, address: Address, block_number: Option) -> RpcResult; - /// Returns the transaction count (nonce) for an address. + /// Returns the number of transactions sent from an address at given block number, with the + /// flashblock state cache overlay support for pending and confirmed block states. #[method(name = "getTransactionCount")] - async fn get_transaction_count( + async fn transaction_count( &self, address: Address, block_number: Option, ) -> RpcResult; - /// Returns the code at a given address. + /// Returns code at a given address at given block number, with the flashblock state cache + /// overlay support for pending and confirmed block states. #[method(name = "getCode")] async fn get_code(&self, address: Address, block_number: Option) -> RpcResult; - /// Returns the storage value at a given address and slot. + /// Returns the value from a storage position at a given address, with the flashblock state + /// cache overlay support for pending and confirmed block states. #[method(name = "getStorageAt")] - async fn get_storage_at( + async fn storage_at( &self, address: Address, slot: U256, block_number: Option, - ) -> RpcResult; - - /// Executes a call with flashblock state support. - #[method(name = "call")] - async fn call( - &self, - transaction: alloy_rpc_types_eth::TransactionRequest, - block_number: Option, - state_overrides: Option, - block_overrides: Option>, - ) -> RpcResult; - - /// Estimates gas with flashblock state support. - #[method(name = "estimateGas")] - async fn estimate_gas( - &self, - transaction: alloy_rpc_types_eth::TransactionRequest, - block_number: Option, - overrides: Option, - ) -> RpcResult; - - // --- Logs --- - - /// Returns logs matching the filter, including pending flashblock logs. - #[method(name = "getLogs")] - async fn get_logs(&self, filter: Filter) -> RpcResult>; + ) -> RpcResult; } /// Extended Eth API with flashblocks cache overlay. -/// -/// Wraps the canonical `eth_api` and `eth_filter` alongside a -/// [`FlashblockStateCache`] to serve flashblocks data for confirmed and -/// pending blocks while delegating canonical chain queries to the underlying -/// `Eth` API. #[derive(Debug)] pub struct XLayerEthApiExt { - eth_api: Eth, - eth_filter: EthFilter, - flash_cache: FlashblockStateCache, + eth_api: OpEthApi, + flashblocks_state: FlashblockStateCache, } impl XLayerEthApiExt { /// Creates a new [`XLayerEthApiExt`]. pub fn new( - eth_api: Eth, - eth_filter: EthFilter, - flash_cache: FlashblockStateCache, + eth_api: OpEthApi, + flashblocks_state: FlashblockStateCache, ) -> Self { - Self { eth_api, eth_filter, flash_cache } + Self { eth_api, flashblocks_state } } } #[async_trait] -impl EthApiOverrideServer for XLayerEthApiExt +impl FlashblocksEthApiOverrideServer for XLayerEthApiExt where Eth: FullEthApi + Send + Sync + 'static, jsonrpsee_types::error::ErrorObject<'static>: From, { + // ----------------- Block apis ----------------- + /// Handler for: `eth_blockNumber` async fn block_number(&self) -> RpcResult { - // The cache's confirm height is always >= canonical height (it tracks - // the max of confirm cache tip and canonical tip). Use the cache's - // pending height (which accounts for the in-progress flashblock - // sequence) as the reported block number. - let height = self.flash_cache.get_pending_height(); - Ok(U256::from(height)) + trace!(target: "rpc::eth", "Serving eth_blockNumber"); + let fb_height = self.flashblocks_state.get_confirm_height(); + let canon_height = self.eth_api.block_number().await?; + Ok(U256::from(std::cmp::max(fb_height, canon_height))) } - async fn get_block_by_number( + /// Handler for: `eth_getBlockByNumber` + async fn block_by_number( &self, number: BlockNumberOrTag, full: bool, ) -> RpcResult>> { - debug!(target: "xlayer::rpc", ?number, "eth_getBlockByNumber"); - - if number.is_pending() { - // Return pending flashblock if available - if let Some(bar) = self.flash_cache.get_pending_block() { - return bar_to_rpc_block::(&bar, full, self.eth_api.converter()) - .map(Some) - .map_err(Into::into); - } - // No pending flashblock — treat as latest - return EthBlocks::rpc_block(&self.eth_api, BlockNumberOrTag::Latest.into(), full) - .await + trace!(target: "rpc::eth", ?number, ?full, "Serving eth_getBlockByNumber"); + if let Some(bar) = self.flashblocks_state.get_rpc_block(number) { + return to_rpc_block::(&bar, full, self.eth_api.converter()) + .map(Some) .map_err(Into::into); } - - // Check confirm cache for specific block numbers - if let BlockNumberOrTag::Number(num) = number { - if let Some(bar) = self.flash_cache.get_block_by_number(num) { - return bar_to_rpc_block::(&bar, full, self.eth_api.converter()) - .map(Some) - .map_err(Into::into); - } - } - - // Delegate to canonical - EthBlocks::rpc_block(&self.eth_api, number.into(), full).await.map_err(Into::into) + self.eth_api.block_by_number(number, full).await } - async fn get_block_by_hash( - &self, - hash: alloy_primitives::B256, - full: bool, - ) -> RpcResult>> { - debug!(target: "xlayer::rpc", %hash, "eth_getBlockByHash"); - - // Check confirm cache first - if let Some(bar) = self.flash_cache.get_block_by_hash(&hash) { - return bar_to_rpc_block::(&bar, full, self.eth_api.converter()) + /// Handler for: `eth_getBlockByHash` + async fn block_by_hash(&self, hash: B256, full: bool) -> RpcResult>> { + trace!(target: "rpc::eth", ?hash, ?full, "Serving eth_getBlockByHash"); + if let Some(bar) = self.flashblocks_state.get_block_by_hash(&hash) { + return to_rpc_block::(&bar, full, self.eth_api.converter()) .map(Some) .map_err(Into::into); } - - // Delegate to canonical - EthBlocks::rpc_block(&self.eth_api, hash.into(), full).await.map_err(Into::into) + self.eth_api.block_by_hash(hash, full).await } - async fn get_block_transaction_count_by_number( + /// Handler for: `eth_getBlockReceipts` + async fn block_receipts( &self, - number: BlockNumberOrTag, - ) -> RpcResult> { - debug!(target: "xlayer::rpc", ?number, "eth_getBlockTransactionCountByNumber"); - - if number.is_pending() { - if let Some(bar) = self.flash_cache.get_pending_block() { - let count = bar.block.body().transaction_count(); - return Ok(Some(U256::from(count))); - } - return EthBlocks::block_transaction_count( - &self.eth_api, - BlockNumberOrTag::Latest.into(), - ) - .await - .map(|opt| opt.map(U256::from)) - .map_err(Into::into); - } - - if let BlockNumberOrTag::Number(num) = number { - if let Some(bar) = self.flash_cache.get_block_by_number(num) { - let count = bar.block.body().transaction_count(); - return Ok(Some(U256::from(count))); - } + block_id: BlockNumberOrTag, + ) -> RpcResult>>> { + trace!(target: "rpc::eth", ?block_id, "Serving eth_getBlockReceipts"); + if let Some(bar) = self.flashblocks_state.get_rpc_block(block_id) { + return to_block_receipts::(&bar, self.eth_api.converter()) + .map(Some) + .map_err(Into::into); } - - EthBlocks::block_transaction_count(&self.eth_api, number.into()) - .await - .map(|opt| opt.map(U256::from)) - .map_err(Into::into) + self.eth_api.block_receipts(block_id).await } - async fn get_block_transaction_count_by_hash( + /// Handler for: `eth_getBlockTransactionCountByNumber` + async fn block_transaction_count_by_number( &self, - hash: alloy_primitives::B256, + number: BlockNumberOrTag, ) -> RpcResult> { - debug!(target: "xlayer::rpc", %hash, "eth_getBlockTransactionCountByHash"); - - if let Some(bar) = self.flash_cache.get_block_by_hash(&hash) { + trace!(target: "rpc::eth", ?number, "Serving eth_getBlockTransactionCountByNumber"); + if let Some(bar) = self.flashblocks_state.get_rpc_block(number) { let count = bar.block.body().transaction_count(); return Ok(Some(U256::from(count))); } - - EthBlocks::block_transaction_count(&self.eth_api, hash.into()) - .await - .map(|opt| opt.map(U256::from)) - .map_err(Into::into) + self.eth_api.block_transaction_count_by_number(number).await } - async fn get_block_receipts( - &self, - block_id: BlockNumberOrTag, - ) -> RpcResult>>> { - debug!(target: "xlayer::rpc", ?block_id, "eth_getBlockReceipts"); - - let bar = if block_id.is_pending() { - self.flash_cache.get_pending_block() - } else if let BlockNumberOrTag::Number(num) = block_id { - self.flash_cache.get_block_by_number(num) - } else { - None - }; - - if let Some(bar) = bar { - let receipts = - bar_to_rpc_receipts::(&bar, self.eth_api.converter()).map_err(Into::into)?; - return Ok(Some(receipts)); + /// Handler for: `eth_getUncleCountByBlockHash` + async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getBlockTransactionCountByHash"); + if let Some(bar) = self.flashblocks_state.get_block_by_hash(&hash) { + let count = bar.block.body().transaction_count(); + return Ok(Some(U256::from(count))); } - - // Delegate to canonical — use the block_receipts helper from the eth_api - // For now, delegate to the canonical handler directly - // TODO: Once reth exposes a direct block_receipts helper, use it - Ok(None) + self.eth_api.block_transaction_count_by_hash(hash).await } - async fn get_transaction_by_hash( + // ----------------- Transaction apis ----------------- + /// Handler for: `eth_getTransactionByHash` + async fn transaction_by_hash( &self, hash: TxHash, ) -> RpcResult>> { - debug!(target: "xlayer::rpc", %hash, "eth_getTransactionByHash"); - - // Check canonical chain FIRST to avoid race condition where flashblocks - // cache hasn't been cleared yet after canonical block commit - if let Some(tx_source) = EthTransactions::transaction_by_hash(&self.eth_api, hash).await? { - let rpc_tx = - tx_source.into_transaction(self.eth_api.converter()).map_err(Into::into)?; - return Ok(Some(rpc_tx)); + trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionByHash"); + if let Some((info, bar)) = self.flashblocks_state.get_tx_info(&hash) { + return Ok(Some(to_rpc_transaction::(&info, &bar, self.eth_api.converter())?)); } + self.eth_api.transaction_by_hash(hash).await + } - // Fall back to flashblocks cache - if let Some(info) = self.flash_cache.get_tx_info(&hash) { - let tx_info = TransactionInfo { - hash: Some(hash), - index: Some(info.tx_index), - block_hash: Some(info.block_hash), - block_number: Some(info.block_number), - base_fee: None, - }; - let recovered = reth_primitives_traits::Recovered::new_unchecked( - info.tx.clone(), - info.tx.recover_signer().unwrap_or_default(), - ); - let rpc_tx = self.eth_api.converter().fill(recovered, tx_info).map_err(Into::into)?; - return Ok(Some(rpc_tx)); + /// Handler for: `eth_getRawTransactionByHash` + async fn raw_transaction_by_hash(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getRawTransactionByHash"); + if let Some((info, _)) = self.flashblocks_state.get_tx_info(&hash) { + return Ok(Some(info.tx.encoded_2718().into())); } + self.eth_api.raw_transaction_by_hash(hash).await + } - Ok(None) + /// Handler for: `eth_getTransactionReceipt` + async fn transaction_receipt(&self, hash: TxHash) -> RpcResult>> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionReceipt"); + if let Some((info, bar)) = self.flashblocks_state.get_tx_info(&hash) { + return Ok(Some(to_rpc_receipt::(&info, &bar, self.eth_api.converter())?)); + } + self.eth_api.transaction_receipt(hash).await } - async fn get_transaction_by_block_hash_and_index( + /// Handler for: `eth_getTransactionByBlockHashAndIndex` + async fn transaction_by_block_hash_and_index( &self, - block_hash: alloy_primitives::B256, - index: alloy_eips::BlockNumberOrTag, + block_hash: B256, + index: Index, ) -> RpcResult>> { - debug!(target: "xlayer::rpc", %block_hash, ?index, "eth_getTransactionByBlockHashAndIndex"); - - let tx_index = match index { - BlockNumberOrTag::Number(n) => n, - _ => return Ok(None), - }; - - if let Some(bar) = self.flash_cache.get_block_by_hash(&block_hash) { - return get_tx_by_index_from_bar::(&bar, tx_index, self.eth_api.converter()) - .map_err(Into::into); + trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getRawTransactionByBlockHashAndIndex"); + if let Some(bar) = self.flashblocks_state.get_block_by_hash(&block_hash) { + return to_rpc_transaction_from_bar_and_index::( + &bar, + index.into(), + self.eth_api.converter(), + ) + .map_err(Into::into); } - - // Delegate to canonical - // The canonical eth_api doesn't expose this directly in a helper trait, - // so we just return None for non-cached blocks and let the main handler - // deal with it. The override will be registered with add_or_replace, so - // this only gets called for our override. - Ok(None) + self.eth_api.transaction_by_block_hash_and_index(block_hash, index).await } - async fn get_transaction_by_block_number_and_index( + /// Handler for: `eth_getTransactionByBlockNumberAndIndex` + async fn transaction_by_block_number_and_index( &self, - block_number: BlockNumberOrTag, - index: alloy_eips::BlockNumberOrTag, + number: BlockNumberOrTag, + index: Index, ) -> RpcResult>> { - debug!(target: "xlayer::rpc", ?block_number, ?index, "eth_getTransactionByBlockNumberAndIndex"); - - let tx_index = match index { - BlockNumberOrTag::Number(n) => n, - _ => return Ok(None), - }; - - let bar = if block_number.is_pending() { - self.flash_cache.get_pending_block() - } else if let BlockNumberOrTag::Number(num) = block_number { - self.flash_cache.get_block_by_number(num) - } else { - None - }; - - if let Some(bar) = bar { - return get_tx_by_index_from_bar::(&bar, tx_index, self.eth_api.converter()) - .map_err(Into::into); + trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getTransactionByBlockNumberAndIndex"); + if let Some(bar) = self.flashblocks_state.get_rpc_block(number) { + return to_rpc_transaction_from_bar_and_index::( + &bar, + index.into(), + self.eth_api.converter(), + ) + .map_err(Into::into); } - - Ok(None) + self.eth_api.transaction_by_block_number_and_index(block_number, index).await } - async fn get_transaction_receipt( + /// Handler for: `eth_getRawTransactionByBlockHashAndIndex` + async fn raw_transaction_by_block_hash_and_index( &self, - hash: TxHash, - ) -> RpcResult>> { - debug!(target: "xlayer::rpc", %hash, "eth_getTransactionReceipt"); - - // Check canonical chain FIRST to avoid race condition - if let Some(canonical_receipt) = - EthTransactions::transaction_receipt(&self.eth_api, hash).await? + hash: B256, + index: Index, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getRawTransactionByBlockHashAndIndex"); + if let Some(bar) = self.flashblocks_state.get_block_by_hash(&hash) + && let Some(tx) = bar.block.body().transactions().nth(index.into()) { - return Ok(Some(canonical_receipt)); - } - - // Fall back to flashblocks cache - if let Some(info) = self.flash_cache.get_tx_info(&hash) { - let receipt = cached_tx_info_to_rpc_receipt::(&info, self.eth_api.converter()) - .map_err(Into::into)?; - return Ok(Some(receipt)); + return Ok(Some(tx.encoded_2718().into())); } - - Ok(None) - } - - // --- State queries (Phase 1: delegate to eth_api) --- - - async fn get_balance( - &self, - address: Address, - block_number: Option, - ) -> RpcResult { - // Phase 1: delegate entirely to eth_api - // Phase 2 will add pending state override from flashblocks cache - EthState::balance(&self.eth_api, address, block_number).await.map_err(Into::into) + self.eth_api.raw_transaction_by_block_hash_and_index(hash, index).await } - async fn get_transaction_count( + /// Handler for: `eth_getRawTransactionByBlockNumberAndIndex` + async fn raw_transaction_by_block_number_and_index( &self, - address: Address, - block_number: Option, - ) -> RpcResult { - EthState::transaction_count(&self.eth_api, address, block_number).await.map_err(Into::into) - } - - async fn get_code(&self, address: Address, block_number: Option) -> RpcResult { - EthState::get_code(&self.eth_api, address, block_number).await.map_err(Into::into) + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getRawTransactionByBlockNumberAndIndex"); + if let Some(bar) = self.flashblocks_state.get_rpc_block(number) + && let Some(tx) = bar.block.body().transactions().nth(index.into()) + { + return Ok(Some(tx.encoded_2718().into())); + } + self.eth_api.raw_transaction_by_block_number_and_index(hash, index).await } - async fn get_storage_at( - &self, - address: Address, - slot: U256, - block_number: Option, - ) -> RpcResult { - EthState::storage_at( - &self.eth_api, - address, - alloy_rpc_types_eth::JsonStorageKey(slot.into()), - block_number, - ) - .await - .map_err(Into::into) + /// Handler for: `eth_sendRawTransactionSync` + async fn send_raw_transaction_sync(&self, tx: Bytes) -> RpcResult> { + trace!(target: "rpc::eth", ?tx, "Serving eth_sendRawTransactionSync"); + // TODO: Implement + self.eth_api.send_raw_transaction_sync(tx).await } + // ----------------- State apis ----------------- + /// Handler for: `eth_call` async fn call( &self, - transaction: alloy_rpc_types_eth::TransactionRequest, + transaction: OpTransactionRequest, block_number: Option, state_overrides: Option, block_overrides: Option>, ) -> RpcResult { - // Phase 1: delegate entirely to eth_api - // Phase 2 will merge flashblocks state overrides for pending - EthCall::call( - &self.eth_api, - transaction, - block_number, - EvmOverrides::new(state_overrides, block_overrides), - ) - .await - .map_err(Into::into) + trace!(target: "rpc::eth", ?request, ?block_number, ?state_overrides, ?block_overrides, "Serving eth_call"); + // TODO: Implement state provider + self.eth_api.call(transaction, block_number, state_overrides, block_overrides).await } + /// Handler for: `eth_estimateGas` async fn estimate_gas( &self, - transaction: alloy_rpc_types_eth::TransactionRequest, + transaction: alloy_rpc_types_eth::OpTransactionRequest, block_number: Option, overrides: Option, ) -> RpcResult { - // Phase 1: delegate entirely to eth_api - let block_id = block_number.unwrap_or_default(); - EthCall::estimate_gas_at(&self.eth_api, transaction, block_id, overrides) - .await - .map_err(Into::into) + trace!(target: "rpc::eth", ?request, ?block_number, "Serving eth_estimateGas"); + // TODO: Implement state provider + self.eth_api.estimate_gas(transaction, block_number, overrides).await } - async fn get_logs(&self, filter: Filter) -> RpcResult> { - debug!(target: "xlayer::rpc", ?filter.address, "eth_getLogs"); - - // Check if this is a range query with pending toBlock - let (from_block, to_block) = match &filter.block_option { - alloy_rpc_types_eth::FilterBlockOption::Range { from_block, to_block } => { - (*from_block, *to_block) - } - _ => { - // Block hash queries or other formats — delegate to eth filter - return self.eth_filter.logs(filter).await; - } - }; - - // If toBlock is not pending, delegate to eth filter - if !matches!(to_block, Some(BlockNumberOrTag::Pending)) { - return self.eth_filter.logs(filter).await; + /// Handler for: `eth_getBalance` + async fn balance(&self, address: Address, block_number: Option) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getBalance"); + if let Some(_bar) = self.flashblocks_state.get_rpc_block_by_id(block_number) { + // TODO: Implement state provider } - - // Mixed query: toBlock is pending — combine historical + pending logs - let mut all_logs = Vec::new(); - - // Get historical logs if fromBlock is not pending - if !matches!(from_block, Some(BlockNumberOrTag::Pending)) { - let mut historical_filter = filter.clone(); - historical_filter.block_option = alloy_rpc_types_eth::FilterBlockOption::Range { - from_block, - to_block: Some(BlockNumberOrTag::Latest), - }; - let historical_logs: Vec = self.eth_filter.logs(historical_filter).await?; - all_logs.extend(historical_logs); - } - - // Get pending logs from flashblocks cache - if let Some(pending_bar) = self.flash_cache.get_pending_block() { - let pending_logs = extract_logs_from_bar(&pending_bar, &filter); - // Dedup: skip logs already fetched in historical range - let historical_max_block = all_logs.last().and_then(|l| l.block_number); - for log in pending_logs { - if let Some(max_block) = historical_max_block { - if log.block_number.is_some_and(|n| n <= max_block) { - continue; - } - } - all_logs.push(log); - } - } - - Ok(all_logs) - } -} - -// --------------------------------------------------------------------------- -// Helper functions -// --------------------------------------------------------------------------- - -/// Converts a [`BlockAndReceipts`] into an RPC block. -fn bar_to_rpc_block>( - bar: &reth_rpc_eth_types::block::BlockAndReceipts, - full: bool, - converter: &Eth::RpcConvert, -) -> Result, Eth::Error> -where - Eth::Error: From<::Error>, -{ - bar.block - .clone_into_rpc_block( - full.into(), - |tx, tx_info| converter.fill(tx, tx_info), - |header, size| converter.convert_header(header, size), - ) - .map_err(Into::into) -} - -/// Converts all receipts from a [`BlockAndReceipts`] into RPC receipts. -fn bar_to_rpc_receipts>( - bar: &reth_rpc_eth_types::block::BlockAndReceipts, - converter: &Eth::RpcConvert, -) -> Result>, Eth::Error> -where - Eth::Error: From<::Error>, -{ - use alloy_consensus::transaction::TxHashRef; - use reth_rpc_convert::transaction::ConvertReceiptInput; - - let block_hash = bar.block.hash(); - let block_number = bar.block.number(); - - let txs = bar.block.body().transactions(); - let receipts = bar.receipts.as_ref(); - - let mut inputs = Vec::with_capacity(txs.len()); - let mut next_log_index = 0usize; - let mut prev_cumulative_gas = 0u64; - - for (idx, (tx, receipt)) in txs.iter().zip(receipts.iter()).enumerate() { - let gas_used = receipt.as_receipt().cumulative_gas_used - prev_cumulative_gas; - prev_cumulative_gas = receipt.as_receipt().cumulative_gas_used; - - let meta = reth_primitives_traits::TransactionMeta { - tx_hash: *tx.tx_hash(), - index: idx as u64, - block_hash, - block_number, - base_fee: bar.block.base_fee_per_gas(), - excess_blob_gas: bar.block.excess_blob_gas(), - timestamp: bar.block.timestamp(), - }; - - inputs.push(ConvertReceiptInput { - receipt: receipt.clone(), - tx: reth_primitives_traits::Recovered::new_unchecked( - tx, - tx.recover_signer().unwrap_or_default(), - ), - gas_used, - next_log_index, - meta, - }); - - next_log_index += receipt.as_receipt().logs.len(); + self.eth_api.balance(address, block_number).await } - converter.convert_receipts(inputs).map_err(Into::into) -} - -/// Converts a single [`CachedTxInfo`] into an RPC receipt. -fn cached_tx_info_to_rpc_receipt>( - info: &xlayer_flashblocks::cache::CachedTxInfo, - converter: &Eth::RpcConvert, -) -> Result, Eth::Error> -where - Eth::Error: From<::Error>, -{ - use alloy_consensus::transaction::TxHashRef; - use reth_rpc_convert::transaction::ConvertReceiptInput; - - let gas_used = info.receipt.as_receipt().cumulative_gas_used; - let meta = reth_primitives_traits::TransactionMeta { - tx_hash: *info.tx.tx_hash(), - index: info.tx_index, - block_hash: info.block_hash, - block_number: info.block_number, - base_fee: None, - excess_blob_gas: None, - timestamp: 0, - }; - - let input = ConvertReceiptInput { - receipt: info.receipt.clone(), - tx: reth_primitives_traits::Recovered::new_unchecked( - &info.tx, - info.tx.recover_signer().unwrap_or_default(), - ), - gas_used, - next_log_index: 0, - meta, - }; - - let mut receipts = converter.convert_receipts(vec![input]).map_err(Into::into)?; - Ok(receipts.remove(0)) -} - -/// Gets a transaction by index from a [`BlockAndReceipts`]. -fn get_tx_by_index_from_bar>( - bar: &reth_rpc_eth_types::block::BlockAndReceipts, - tx_index: u64, - converter: &Eth::RpcConvert, -) -> Result>, Eth::Error> -where - Eth::Error: From<::Error>, -{ - use alloy_consensus::transaction::TxHashRef; - - let txs = bar.block.body().transactions(); - let idx = tx_index as usize; - if idx >= txs.len() { - return Ok(None); - } - - let tx = &txs[idx]; - let block_hash = bar.block.hash(); - let block_number = bar.block.number(); - - let tx_info = TransactionInfo { - hash: Some(*tx.tx_hash()), - index: Some(tx_index), - block_hash: Some(block_hash), - block_number: Some(block_number), - base_fee: bar.block.base_fee_per_gas(), - }; - - let recovered = reth_primitives_traits::Recovered::new_unchecked( - tx.clone(), - tx.recover_signer().unwrap_or_default(), - ); - let rpc_tx = converter.fill(recovered, tx_info).map_err(Into::into)?; - Ok(Some(rpc_tx)) -} - -/// Extracts logs from a [`BlockAndReceipts`] that match the given filter. -fn extract_logs_from_bar( - bar: &reth_rpc_eth_types::block::BlockAndReceipts, - filter: &Filter, -) -> Vec { - use alloy_consensus::transaction::TxHashRef; - - let block_hash = bar.block.hash(); - let block_number = bar.block.number(); - - let mut logs = Vec::new(); - let mut log_index = 0u64; - - let txs = bar.block.body().transactions(); - let receipts = bar.receipts.as_ref(); - - for (tx_idx, (tx, receipt)) in txs.iter().zip(receipts.iter()).enumerate() { - for receipt_log in &receipt.as_receipt().logs { - // Check address filter - if !filter.address.matches_any(&receipt_log.address) { - log_index += 1; - continue; - } - - // Check topics filter - if !filter_matches_topics(&filter.topics, &receipt_log.topics()) { - log_index += 1; - continue; - } - - logs.push(Log { - inner: receipt_log.clone(), - block_hash: Some(block_hash), - block_number: Some(block_number), - block_timestamp: Some(bar.block.timestamp()), - transaction_hash: Some(*tx.tx_hash()), - transaction_index: Some(tx_idx as u64), - log_index: Some(log_index), - removed: false, - }); - log_index += 1; + /// Handler for: `eth_getTransactionCount` + async fn transaction_count( + &self, + address: Address, + block_number: Option, + ) -> RpcResult { + if let Some(_bar) = self.flashblocks_state.get_rpc_block_by_id(block_number) { + // TODO: Implement state provider } + self.eth_api.transaction_count(address, block_number).await } - logs -} + async fn get_code(&self, address: Address, block_number: Option) -> RpcResult { + self.eth_api.get_code(address, block_number).await + } -/// Checks if log topics match the filter topics. -fn filter_matches_topics( - filter_topics: &[alloy_rpc_types_eth::FilterSet], - log_topics: &[alloy_primitives::B256], -) -> bool { - for (i, filter_set) in filter_topics.iter().enumerate() { - if filter_set.is_empty() { - continue; - } - match log_topics.get(i) { - Some(topic) => { - if !filter_set.matches(topic) { - return false; - } - } - None => return false, + /// Handler for: `eth_getStorageAt` + async fn storage_at( + &self, + address: Address, + slot: U256, + block_number: Option, + ) -> RpcResult { + if let Some(_bar) = self.flashblocks_state.get_rpc_block_by_id(block_number) { + // TODO: Implement state provider } + self.eth_api.storage_at(address, slot, block_number).await } - true } diff --git a/crates/rpc/src/helper.rs b/crates/rpc/src/helper.rs new file mode 100644 index 00000000..d5879ccf --- /dev/null +++ b/crates/rpc/src/helper.rs @@ -0,0 +1,164 @@ +use alloy_consensus::TxReceipt; +use alloy_primitives::B256; +use alloy_rpc_types_eth::TransactionInfo; +use op_alloy_network::Optimism; + +use reth_optimism_primitives::OpPrimitives; +use reth_primitives_traits::{Recovered, SignerRecoverable, TransactionMeta}; +use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert, RpcTransaction}; +use reth_rpc_eth_api::{EthApiTypes, RpcBlock, RpcReceipt}; +use reth_rpc_eth_types::{block::BlockAndReceipts, utils::calculate_gas_used_and_next_log_index}; + +use xlayer_flashblocks::cache::CachedTxInfo; + +/// Converter for `TransactionMeta` +pub(crate) fn build_tx_meta( + bar: &BlockAndReceipts, + tx_hash: B256, + index: u64, +) -> TransactionMeta { + TransactionMeta { + tx_hash, + index, + block_hash: bar.block.hash(), + block_number: bar.block.number(), + base_fee: bar.block.base_fee_per_gas(), + excess_blob_gas: bar.block.excess_blob_gas(), + timestamp: bar.block.timestamp(), + } +} + +/// Converter for `TransactionInfo` +pub(crate) fn build_tx_info( + bar: &BlockAndReceipts, + tx_hash: B256, + index: u64, +) -> TransactionInfo { + TransactionInfo { + hash: Some(tx_hash), + index: Some(index), + block_hash: Some(bar.block.hash()), + block_number: Some(bar.block.number()), + base_fee: bar.block.base_fee_per_gas(), + } +} + +/// Converts a `BlockAndReceipts` into an RPC block. +pub(crate) fn to_rpc_block>( + bar: &BlockAndReceipts, + full: bool, + converter: &Eth::RpcConvert, +) -> Result, Eth::Error> +where + Eth::RpcConvert: RpcConvert, + Eth::Error: From<::Error>, +{ + Ok(bar.block.clone_into_rpc_block( + full.into(), + |tx, tx_info| converter.fill(tx, tx_info), + |header, size| converter.convert_header(header, size), + )?) +} + +/// Converts all receipts from a `BlockAndReceipts` into RPC receipts. +pub(crate) fn to_block_receipts>( + bar: &BlockAndReceipts, + converter: &Eth::RpcConvert, +) -> Result>, Eth::Error> +where + Eth::RpcConvert: RpcConvert, + Eth::Error: From<::Error>, +{ + let txs = bar.block.body().transactions(); + let senders = bar.block.senders(); + let receipts = bar.receipts.as_ref(); + + let mut prev_cumulative_gas = 0u64; + let mut next_log_index = 0usize; + + let inputs = txs + .iter() + .zip(senders.iter()) + .zip(receipts.iter()) + .enumerate() + .map(|(idx, ((tx, sender), receipt))| { + let gas_used = receipt.cumulative_gas_used() - prev_cumulative_gas; + prev_cumulative_gas = receipt.cumulative_gas_used(); + let logs_len = receipt.logs().len(); + + let meta = build_tx_meta(bar, tx.tx_hash(), idx as u64); + let input = ConvertReceiptInput { + tx: Recovered::new_unchecked(tx, *sender), + gas_used, + next_log_index, + meta, + receipt: receipt.clone(), + }; + + next_log_index += logs_len; + + input + }) + .collect::>(); + + Ok(converter.convert_receipts(inputs)?) +} + +/// Converts a single `CachedTxInfo` into an RPC receipt, using the full block receipts +/// from `BlockAndReceipts` to correctly calculate gas used and log index offsets. +pub(crate) fn to_rpc_receipt>( + info: &CachedTxInfo, + bar: &BlockAndReceipts, + converter: &Eth::RpcConvert, +) -> Result, Eth::Error> +where + Eth::RpcConvert: RpcConvert, + Eth::Error: From<::Error>, +{ + let (prev_cumulative_gas, next_log_index) = + calculate_gas_used_and_next_log_index(info.tx_index, bar.receipts.as_ref()); + + let meta = build_tx_meta(bar, info.tx.tx_hash(), info.tx_index); + let recovered = info.tx.try_into_recovered_unchecked()?; + Ok(converter + .convert_receipts(vec![ConvertReceiptInput { + tx: recovered.as_recovered_ref(), + gas_used: info.receipt.cumulative_gas_used() - prev_cumulative_gas, + next_log_index, + meta, + receipt: info.receipt.clone(), + }])? + .pop() + .unwrap()) +} + +/// Converts a `CachedTxInfo` and `BlockAndReceipts` into an RPC transaction. +pub(crate) fn to_rpc_transaction>( + info: &CachedTxInfo, + bar: &BlockAndReceipts, + converter: &Eth::RpcConvert, +) -> Result, Eth::Error> +where + Eth::RpcConvert: RpcConvert, + Eth::Error: From<::Error>, +{ + let tx_info = build_tx_info(bar, info.tx.tx_hash(), info.tx_index); + Ok(converter.fill(info.tx.try_into_recovered_unchecked()?, tx_info)?) +} + +/// Converts a `BlockAndReceipts` and transaction index into an RPC transaction. +pub(crate) fn to_rpc_transaction_from_bar_and_index>( + bar: &BlockAndReceipts, + index: usize, + converter: &Eth::RpcConvert, +) -> Result>, Eth::Error> +where + Eth::RpcConvert: RpcConvert, + Eth::Error: From<::Error>, +{ + if let Some((signer, tx)) = bar.block.transactions_with_sender().nth(index) { + let tx_info = build_tx_info(bar, tx.tx_hash(), index as u64); + return Ok(Some(converter.fill(tx.clone().with_signer(*signer), tx_info)?)); + } + Ok(None) +} diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index a11d836d..dcdb19fe 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -2,6 +2,7 @@ #![cfg_attr(docsrs, feature(doc_cfg))] pub mod eth; +pub mod helper; pub mod xlayer_ext; pub use eth::{EthApiOverrideServer, XLayerEthApiExt}; diff --git a/crates/rpc/src/xlayer_ext.rs b/crates/rpc/src/xlayer_ext.rs index 8ca4088a..bd9c50b6 100644 --- a/crates/rpc/src/xlayer_ext.rs +++ b/crates/rpc/src/xlayer_ext.rs @@ -2,9 +2,11 @@ use jsonrpsee::{ core::{async_trait, RpcResult}, proc_macros::rpc, }; + use reth_optimism_primitives::OpPrimitives; use reth_optimism_rpc::SequencerClient; -use xlayer_flashblocks::cache::FlashblockStateCache; + +use xlayer_flashblocks::FlashblockStateCache; /// Trait for accessing sequencer client from backend pub trait SequencerClientProvider { @@ -15,37 +17,34 @@ pub trait SequencerClientProvider { /// `XLayer`-specific RPC API trait. #[rpc(server, namespace = "eth")] pub trait XlayerRpcExtApi { - /// Returns boolean indicating if the node's flashblocks functionality is - /// enabled and working. + /// Returns boolean indicating if the node's flashblocks RPC functionality is enabled, + /// and if the flashblocks state cache is initialized. /// - /// Returns `true` when the flashblocks state cache has been initialized - /// (i.e. confirm height > 0), meaning the node is actively receiving and - /// caching flashblock data. + /// Returns `true` if the flashblocks state cache is not `None`, and when the flashblocks + /// state cache has been initialized (i.e. confirm height > 0), meaning the node is actively + /// receiving and caching flashblock data. #[method(name = "flashblocksEnabled")] async fn flashblocks_enabled(&self) -> RpcResult; } /// `XLayer` RPC extension implementation. -/// -/// Checks the [`FlashblockStateCache`] confirm height to determine if -/// flashblocks are active. A non-zero confirm height means the cache has been -/// initialized and is actively tracking flashblock state. #[derive(Debug, Clone)] pub struct XlayerRpcExt { - flash_cache: Option>, + flashblocks_state: Option>, } impl XlayerRpcExt { /// Creates a new [`XlayerRpcExt`]. - pub fn new(flash_cache: Option>) -> Self { - Self { flash_cache } + pub fn new(flashblocks_state: Option>) -> Self { + Self { flashblocks_state } } } #[async_trait] impl XlayerRpcExtApiServer for XlayerRpcExt { + /// Handler for: `eth_flashblocksEnabled` async fn flashblocks_enabled(&self) -> RpcResult { - Ok(self.flash_cache.as_ref().is_some_and(|cache| cache.get_confirm_height() > 0)) + Ok(self.flashblocks_state.as_ref().is_some_and(|cache| cache.get_confirm_height() > 0)) } } From 043e31252cb03d7ed36b15dc5066dcee6acc0c55 Mon Sep 17 00:00:00 2001 From: Niven Date: Thu, 12 Mar 2026 21:31:17 +0800 Subject: [PATCH 17/76] refactor(node): unify flashblocks state init into single if-let block MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Consolidate the previously split flashblocks service setup and state cache creation into one cohesive `if let Some` block. This fixes a subtle bug where using `.map()` with `?` operators would produce `Option>` instead of the intended `Option`, causing `?` to not propagate errors to the outer closure. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.6 --- bin/node/src/main.rs | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index a9aecb37..0a8e54f1 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -124,8 +124,11 @@ fn main() { .extend_rpc_modules(move |ctx| { let new_op_eth_api = Arc::new(ctx.registry.eth_api().clone()); - // Initialize flashblocks RPC service if not in flashblocks sequencer mode - if let Some(flashblock_url) = args.xlayer_args.flashblocks_rpc.flashblock_url { + // Initialize flashblocks RPC service and cache if flashblocks URL is configured. + let flashblocks_state = if let Some(flashblock_url) = + args.xlayer_args.flashblocks_rpc.flashblock_url + { + let flashblocks_state = FlashblockStateCache::new(); let stream = WsFlashBlockStream::new(flashblock_url); let service = FlashblocksRpcService::new( ctx.node().task_executor().clone(), @@ -137,8 +140,7 @@ fn main() { service.spawn(); info!(target: "reth::cli", "xlayer flashblocks service initialized"); - if xlayer_args.enable_flashblocks_subscription - { + if args.xlayer_args.flashblocks_rpc.enable_flashblocks_subscription { let flashblocks_pubsub = FlashblocksPubSub::new( ctx.registry.eth_handlers().pubsub.clone(), service.subscribe_pending_sequence(), @@ -152,24 +154,21 @@ fn main() { )?; info!(target: "reth::cli", "xlayer eth pubsub initialized"); } - } - - // Create flashblocks state cache if flashblocks URL is configured. - // Shared between the Eth API override and the ext RPC. - let flashblocks_state = args.rollup_args.flashblocks_url.map(|_| FlashblockStateCache::new()); - // Register flashblocks Eth API override (replaces subset of eth_ methods) - if let Some(fb_cache) = flashblocks_state.as_ref() { let flashblocks_eth = XLayerEthApiExt::new( ctx.registry.eth_api().clone(), - fb_cache.clone(), + flashblocks_state.clone(), ); ctx.modules.add_or_replace_if_module_configured( RethRpcModule::Eth, EthApiOverrideServer::into_rpc(flashblocks_eth), )?; info!(target: "reth::cli", "xlayer flashblocks eth api override enabled"); - } + + Some(flashblocks_state) + } else { + None + }; // Register X Layer RPC (eth_flashblocksEnabled) — always active let xlayer_rpc = XlayerRpcExt::new(flashblocks_state); From 5ab3b80d621f2866a6db82dea7132136f855149c Mon Sep 17 00:00:00 2001 From: Niven Date: Thu, 12 Mar 2026 23:17:56 +0800 Subject: [PATCH 18/76] refactor(flashblocks): store ExecutedBlock in confirm cache for state access MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Restructure ConfirmCache to store ExecutedBlock directly instead of just BlockAndReceipts, enabling state provider overlay for flashblock state queries. - Add ConfirmedBlock struct wrapping ExecutedBlock + receipts - Update insert/remove signatures to accept ExecutedBlock - Make PendingSequence.tx_index pub for state access - Fix execution cache test assertions 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- crates/flashblocks/src/cache/confirm.rs | 71 +++++++++++++++++------ crates/flashblocks/src/cache/mod.rs | 12 +++- crates/flashblocks/src/cache/pending.rs | 2 +- crates/flashblocks/src/execution/cache.rs | 14 +---- 4 files changed, 65 insertions(+), 34 deletions(-) diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index cdf2f897..147d74a1 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -1,13 +1,38 @@ use crate::CachedTxInfo; -use std::collections::{BTreeMap, HashMap}; +use eyre::eyre; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; use alloy_consensus::transaction::TxHashRef; use alloy_primitives::{TxHash, B256}; -use eyre::eyre; -use reth_primitives_traits::{BlockBody, NodePrimitives}; + +use reth_chain_state::ExecutedBlock; +use reth_primitives_traits::{BlockBody, NodePrimitives, ReceiptTy}; use reth_rpc_eth_types::block::BlockAndReceipts; -const DEFAULT_CONFIRM_CACHE_SIZE: usize = 1_000; +const DEFAULT_CONFIRM_BLOCK_CACHE_SIZE: usize = 1_000; +const DEFAULT_TX_CACHE_SIZE: usize = DEFAULT_CONFIRM_BLOCK_CACHE_SIZE * 10_000; + +#[derive(Debug)] +pub struct ConfirmedBlock { + /// The locally built pending block with execution output. + pub executed_block: ExecutedBlock, + /// The receipts for the pending block + pub receipts: Arc>>, +} + +impl ConfirmedBlock { + /// Returns a pair of [`RecoveredBlock`] and a vector of [`NodePrimitives::Receipt`]s by + /// cloning from borrowed self. + pub fn to_block_and_receipts(&self) -> BlockAndReceipts { + BlockAndReceipts { + block: self.executed_block.recovered_block.clone(), + receipts: self.receipts.clone(), + } + } +} /// Confirmed flashblocks sequence cache that is ahead of the current node's canonical /// chainstate. We optimistically commit confirmed flashblocks sequences to the cache @@ -23,7 +48,7 @@ const DEFAULT_CONFIRM_CACHE_SIZE: usize = 1_000; pub struct ConfirmCache { /// Primary storage: block number → (block hash, block + receipts). /// `BTreeMap` ordering enables efficient range-based flush via `split_off`. - blocks: BTreeMap)>, + blocks: BTreeMap)>, /// Reverse index: block hash → block number for O(1) hash-based lookups. hash_to_number: HashMap, /// Transaction index: tx hash → cached tx info for O(1) tx/receipt lookups. @@ -39,7 +64,11 @@ impl Default for ConfirmCache { impl ConfirmCache { /// Creates a new [`ConfirmCache`]. pub fn new() -> Self { - Self { blocks: BTreeMap::new(), hash_to_number: HashMap::new(), tx_index: HashMap::new() } + Self { + blocks: BTreeMap::new(), + hash_to_number: HashMap::with_capacity(DEFAULT_CONFIRM_BLOCK_CACHE_SIZE), + tx_index: HashMap::with_capacity(DEFAULT_TX_CACHE_SIZE), + } } /// Returns the number of cached entries. @@ -60,18 +89,22 @@ impl ConfirmCache { /// before inserting if a reorg is detected. /// /// Returns an error if the cache is at max capacity. - pub fn insert(&mut self, height: u64, block: BlockAndReceipts) -> eyre::Result<()> { - if self.blocks.len() >= DEFAULT_CONFIRM_CACHE_SIZE { + pub fn insert( + &mut self, + height: u64, + executed_block: ExecutedBlock, + receipts: Arc>>, + ) -> eyre::Result<()> { + if self.blocks.len() >= DEFAULT_CONFIRM_BLOCK_CACHE_SIZE { return Err(eyre!( - "confirm cache at max capacity ({DEFAULT_CONFIRM_CACHE_SIZE}), cannot insert block: {height}" + "confirm cache at max capacity ({DEFAULT_CONFIRM_BLOCK_CACHE_SIZE}), cannot insert block: {height}" )); } // Build tx index entries for all transactions in this block - let hash = block.block.hash(); - let txs = block.block.body().transactions(); - let receipts = block.receipts.as_ref(); - for (idx, (tx, receipt)) in txs.iter().zip(receipts.iter()).enumerate() { + let hash = executed_block.recovered_block.hash(); + let txs = executed_block.recovered_block.body().transactions(); + for (idx, (tx, receipt)) in txs.iter().zip(receipts.as_ref().iter()).enumerate() { let tx_hash = *tx.tx_hash(); self.tx_index.insert( tx_hash, @@ -87,7 +120,7 @@ impl ConfirmCache { // Build block index entries for block data self.hash_to_number.insert(hash, height); - self.blocks.insert(height, (hash, block)); + self.blocks.insert(height, (hash, ConfirmedBlock { executed_block, receipts })); Ok(()) } @@ -115,7 +148,7 @@ impl ConfirmCache { /// Returns the confirmed block for the given block number, if present. pub fn get_block_by_number(&self, block_number: u64) -> Option> { - self.blocks.get(&block_number).map(|(_, block)| block.clone()) + self.blocks.get(&block_number).map(|(_, entry)| entry.to_block_and_receipts()) } /// Returns the cached transaction info for the given tx hash, if present. @@ -137,7 +170,7 @@ impl ConfirmCache { } /// Removes and returns the confirmed block for the given block number. - pub fn remove_block_by_number(&mut self, block_number: u64) -> Option> { + pub fn remove_block_by_number(&mut self, block_number: u64) -> Option> { let (hash, block) = self.blocks.remove(&block_number)?; self.hash_to_number.remove(&hash); self.remove_tx_index_for_block(&block); @@ -145,7 +178,7 @@ impl ConfirmCache { } /// Removes and returns the confirmed block for the given block hash. - pub fn remove_block_by_hash(&mut self, block_hash: &B256) -> Option> { + pub fn remove_block_by_hash(&mut self, block_hash: &B256) -> Option> { let number = self.hash_to_number.remove(block_hash)?; let (_, block) = self.blocks.remove(&number)?; self.remove_tx_index_for_block(&block); @@ -153,8 +186,8 @@ impl ConfirmCache { } /// Removes all tx index entries for the transactions in the given block. - fn remove_tx_index_for_block(&mut self, bar: &BlockAndReceipts) { - for tx in bar.block.body().transactions() { + fn remove_tx_index_for_block(&mut self, block: &ConfirmedBlock) { + for tx in block.executed_block.recovered_block.body().transactions() { self.tx_index.remove(&*tx.tx_hash()); } } diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 43c29735..492b5ba0 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -13,6 +13,7 @@ use tracing::*; use alloy_primitives::{TxHash, B256}; use alloy_rpc_types_eth::{BlockId, BlockNumberOrTag}; +use reth_chain_state::ExecutedBlock; use reth_primitives_traits::{NodePrimitives, ReceiptTy}; use reth_rpc_eth_types::block::BlockAndReceipts; @@ -169,7 +170,8 @@ impl FlashblockStateCacheInner { fn handle_confirmed_block( &mut self, block_number: u64, - block: BlockAndReceipts, + executed_block: ExecutedBlock, + receipts: Arc>>, ) -> eyre::Result<()> { if block_number <= self.confirm_height { return Err(eyre::eyre!( @@ -183,7 +185,7 @@ impl FlashblockStateCacheInner { } self.confirm_height = block_number; - self.confirm_cache.insert(block_number, block)?; + self.confirm_cache.insert(block_number, executed_block, receipts)?; Ok(()) } @@ -202,7 +204,11 @@ impl FlashblockStateCacheInner { "polluted state cache - trying to advance pending tip but no current pending" ) })?; - self.handle_confirmed_block(expected_height, sequence.get_block_and_receipts())?; + self.handle_confirmed_block( + expected_height, + sequence.pending.executed_block, + sequence.pending.receipts, + )?; self.pending_cache = Some(pending_sequence); } else if pending_height == expected_height { // Replace the existing pending sequence diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs index 249251bc..946cabf0 100644 --- a/crates/flashblocks/src/cache/pending.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -16,7 +16,7 @@ pub struct PendingSequence { #[deref] pub pending: PendingBlock, /// Transaction index: tx hash → cached tx info for O(1) tx/receipt lookups. - tx_index: HashMap>, + pub tx_index: HashMap>, /// Cached reads from execution for reuse. pub cached_reads: CachedReads, /// The current block hash of the latest flashblocks sequence. diff --git a/crates/flashblocks/src/execution/cache.rs b/crates/flashblocks/src/execution/cache.rs index 2fbdd5d6..4c2efa92 100644 --- a/crates/flashblocks/src/execution/cache.rs +++ b/crates/flashblocks/src/execution/cache.rs @@ -495,7 +495,7 @@ mod tests { let fb1_txs = vec![tx_a, tx_b]; let result = cache.get_resumable_state(100, &fb1_txs); assert!(result.is_some()); - assert_eq!(result.unwrap().2, 1); // 1 tx covered by cache + assert_eq!(result.unwrap().4, 1); // 1 tx covered by cache cache.update(100, fb1_txs, BundleState::default(), vec![]); assert_eq!(cache.len(), 2); @@ -665,19 +665,11 @@ mod tests { ); // Matching block + parent should hit. - let hit = cache.get_resumable_state_with_execution_meta_for_parent( - 100, - parent_a, - &[tx_a, tx_b, tx_c], - ); + let hit = cache.get_resumable_state_for_parent(100, parent_a, &[tx_a, tx_b, tx_c]); assert!(hit.is_some()); // Same block but different parent should miss. - let miss = cache.get_resumable_state_with_execution_meta_for_parent( - 100, - parent_b, - &[tx_a, tx_b, tx_c], - ); + let miss = cache.get_resumable_state_for_parent(100, parent_b, &[tx_a, tx_b, tx_c]); assert!(miss.is_none()); } } From b62076a5f671c255cbeb963390d4f5fe16da9714 Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 13 Mar 2026 12:55:32 +0800 Subject: [PATCH 19/76] feat(flashblocks-rpc): add memory overlay state provider for flashblocks state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- Cargo.lock | 3 + crates/flashblocks/src/cache/confirm.rs | 63 +++++++---- crates/flashblocks/src/cache/mod.rs | 142 ++++++++++++++++++++---- crates/rpc/Cargo.toml | 3 + crates/rpc/src/eth.rs | 67 ++++++++--- 5 files changed, 222 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4b76140b..d1afec7e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14417,6 +14417,7 @@ dependencies = [ "async-trait", "jsonrpsee", "op-alloy-network", + "reth-chain-state", "reth-optimism-primitives", "reth-optimism-rpc", "reth-primitives-traits", @@ -14424,6 +14425,8 @@ dependencies = [ "reth-rpc-convert", "reth-rpc-eth-api", "reth-rpc-eth-types", + "reth-rpc-server-types", + "reth-storage-api", "tokio", "tracing", "xlayer-flashblocks", diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index 147d74a1..fbdba3c9 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -81,14 +81,7 @@ impl ConfirmCache { self.blocks.is_empty() } - /// Inserts a confirmed block into the cache, indexed by both block number - /// and block hash. - /// - /// This is a raw insert with no reorg detection — callers are responsible - /// for flushing invalidated entries via [`flush_from`](Self::flush_from) - /// before inserting if a reorg is detected. - /// - /// Returns an error if the cache is at max capacity. + /// Inserts a confirmed block into the cache, indexed by block number and block hash. pub fn insert( &mut self, height: u64, @@ -151,7 +144,6 @@ impl ConfirmCache { self.blocks.get(&block_number).map(|(_, entry)| entry.to_block_and_receipts()) } - /// Returns the cached transaction info for the given tx hash, if present. /// Returns the cached transaction info for the given tx hash, if present. pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option<(CachedTxInfo, BlockAndReceipts)> { let tx_info = self.tx_index.get(tx_hash).cloned()?; @@ -159,14 +151,42 @@ impl ConfirmCache { Some((tx_info, block)) } - /// Returns `true` if the cache contains a block with the given hash. - pub fn contains_hash(&self, block_hash: &B256) -> bool { - self.hash_to_number.contains_key(block_hash) - } - - /// Returns `true` if the cache contains a block with the given number. - pub fn contains_number(&self, block_number: u64) -> bool { - self.blocks.contains_key(&block_number) + /// Returns all `ExecutedBlock`s in the cache up to and including `target_height`, + /// ordered newest to oldest (for use with `MemoryOverlayStateProvider`). + /// + /// Returns an error if state cache pollution detected (non-contiguous blocks). + pub fn get_executed_blocks_up_to_height( + &self, + target_height: u64, + canon_height: u64, + ) -> eyre::Result>> { + // Validation checks + let entries: Vec<_> = self.blocks.range(..=target_height).collect(); + if !entries.is_empty() { + // Verify lowest overlay block must be at most `canon_height + 1` to ensure + // no gap between canonical state and the overlay + let lowest = *entries[0].0; + if lowest > canon_height + 1 { + return Err(eyre!( + "gap between canonical height {canon_height} and lowest overlay block {lowest}" + )); + } + // Verify contiguity + for window in entries.windows(2) { + let (a, _) = window[0]; + let (b, _) = window[1]; + if *b != *a + 1 { + return Err(eyre!( + "non-contiguous confirm cache: gap between blocks {a} and {b}" + )); + } + } + } + Ok(entries + .into_iter() + .rev() + .map(|(_, (_, confirmed))| confirmed.executed_block.clone()) + .collect()) } /// Removes and returns the confirmed block for the given block number. @@ -194,12 +214,11 @@ impl ConfirmCache { /// Flushes all entries with block number <= `canonical_number`. /// - /// Called when the canonical chain catches up to the confirmed cache. - /// Returns the number of entries flushed. - pub fn flush_up_to(&mut self, canonical_number: u64) -> usize { - let retained = self.blocks.split_off(&(canonical_number + 1)); + /// Called when the canonical chain catches up to the confirmed cache. Returns + /// the number of entries flushed. + pub fn flush_up_to_height(&mut self, canon_height: u64) -> usize { + let retained = self.blocks.split_off(&(canon_height + 1)); let stale = std::mem::replace(&mut self.blocks, retained); - let count = stale.len(); for (hash, bar) in stale.into_values() { self.hash_to_number.remove(&hash); diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 492b5ba0..23d23088 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -11,11 +11,13 @@ use parking_lot::RwLock; use std::sync::Arc; use tracing::*; +use alloy_consensus::BlockHeader; use alloy_primitives::{TxHash, B256}; use alloy_rpc_types_eth::{BlockId, BlockNumberOrTag}; -use reth_chain_state::ExecutedBlock; +use reth_chain_state::{ExecutedBlock, MemoryOverlayStateProvider}; use reth_primitives_traits::{NodePrimitives, ReceiptTy}; use reth_rpc_eth_types::block::BlockAndReceipts; +use reth_storage_api::StateProviderBox; /// Cached transaction info (block context, receipt and tx data) for O(1) lookups /// by transaction hash. @@ -105,6 +107,78 @@ impl FlashblockStateCache { pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option<(CachedTxInfo, BlockAndReceipts)> { self.inner.read().get_tx_info(tx_hash) } + + /// Creates a `StateProviderBox` that overlays the flashblock execution state on top of the + /// canonical state for the given block ID. Instantiates a `MemoryOverlayStateProvider` by + /// getting the ordered `ExecutedBlock`s from the cache, and overlaying them on top of the + /// canonical state provider. + /// + /// For a specific block number/hash, returns all confirm cache blocks up to that height. + /// For `Pending`, it also includes the current pending executed block state. + /// For `Latest`, resolves to the confirm height. + /// Returns `None` if the target block is not in the flashblocks cache. + /// + /// # Safety of the overlay + /// The returned blocks are meant to be layered on top of a canonical `StateProviderBox` + /// via `MemoryOverlayStateProvider`. This is correct **if and only if** the overlay + /// blocks form a contiguous chain from some height down to `canonical_height + 1` + /// (or `canonical_height` itself in the redundant-but-safe race case). + /// + /// **Safe (redundant overlap)**: Due to a race between canonical commit and confirm + /// cache flush, the lowest overlay block may equal the canonical height. For example, + /// canonical is at height `x` and the overlay contains `[x+2, x+1, x]`. This is safe + /// because `MemoryOverlayStateProvider` checks overlay blocks first (newest-to-oldest) + /// — the duplicate `BundleState` at height `x` contains changes identical to what + /// canonical already applied, so the result is correct regardless of which source + /// resolves the query. + /// + /// **State inconsistency (gap in overlay)**: If an intermediate block is missing (e.g. + /// overlay has `[x+2, x]` but not `x+1`), any account modified only at height `x+1` + /// would be invisible — the query falls through to canonical, returning stale state. + /// + /// **State inconsistency (canonical too far behind)**: If the canonical height is more + /// than one block below the lowest overlay block (e.g. canonical at `x-2`, lowest overlay + /// at `x`), changes at height `x-1` are not covered by either source. + /// + /// Both failure modes reduce to: every height between `canonical_height + 1` and the + /// target must be present in the overlay. This invariant is naturally maintained by + /// `handle_confirmed_block` (rejects non-consecutive heights) and the pending block always + /// being `confirm_height + 1`. + /// + /// On validation failure (non-contiguous overlay or gap to canonical), the cache is + /// flushed and `None` is returned. + pub fn get_state_provider_by_id( + &self, + block_id: Option, + canonical_state: StateProviderBox, + ) -> Option { + let in_memory = { + let guard = self.inner.read(); + let block_num = match block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)) { + BlockId::Number(id) => match id { + BlockNumberOrTag::Pending => guard.get_pending_block(), + BlockNumberOrTag::Latest => guard.get_confirmed_block(), + BlockNumberOrTag::Number(num) => guard.get_block_by_number(num), + _ => None, + }, + BlockId::Hash(hash) => guard.get_block_by_hash(&hash.block_hash), + }? + .block + .number(); + + match guard.get_executed_blocks_up_to_height(block_num) { + Ok(Some(blocks)) => blocks, + Ok(None) => return None, + Err(e) => { + warn!(target: "flashblocks", "Failed to get flashblocks state provider: {e}. Flushing cache"); + drop(guard); + self.inner.write().flush(); + return None; + } + } + }; + Some(Box::new(MemoryOverlayStateProvider::new(canonical_state, in_memory))) + } } // FlashblockStateCache state mutation interfaces. @@ -153,11 +227,25 @@ struct FlashblockStateCacheInner { /// Highest confirmed block height in the confirm cache. If flashblocks state cache /// is uninitialized, the confirm height is set to 0. confirm_height: u64, + /// Highest confirmed block height in the canonical chainstate. + canon_height: u64, } impl FlashblockStateCacheInner { fn new() -> Self { - Self { pending_cache: None, confirm_cache: ConfirmCache::new(), confirm_height: 0 } + Self { + pending_cache: None, + confirm_cache: ConfirmCache::new(), + confirm_height: 0, + canon_height: 0, + } + } + + fn flush(&mut self) { + warn!(target: "flashblocks", "Flushing flashblocks state cache"); + self.pending_cache = None; + self.confirm_height = 0; + self.confirm_cache.clear(); } /// Handles flushing a newly confirmed block to the confirm cache. Note that @@ -173,17 +261,11 @@ impl FlashblockStateCacheInner { executed_block: ExecutedBlock, receipts: Arc>>, ) -> eyre::Result<()> { - if block_number <= self.confirm_height { - return Err(eyre::eyre!( - "polluted state cache - trying to commit lower confirm height block" - )); - } if block_number != self.confirm_height + 1 { return Err(eyre::eyre!( - "polluted state cache - not next consecutive confirm height block" + "polluted state cache - not next consecutive target confirm height block" )); } - self.confirm_height = block_number; self.confirm_cache.insert(block_number, executed_block, receipts)?; Ok(()) @@ -224,14 +306,14 @@ impl FlashblockStateCacheInner { fn handle_canonical_block(&mut self, canon_height: u64, reorg: bool) { let pending_stale = self.pending_cache.as_ref().is_some_and(|p| p.get_height() <= canon_height); - if pending_stale || reorg { warn!( target: "flashblocks", canonical_height = canon_height, cache_height = self.confirm_height, - reorg, - "Flushing flashblocks state cache", + canonical_reorg = reorg, + pending_stale = pending_stale, + "Reorg or pending stale detected on handle canonical block", ); self.flush(); } else { @@ -241,17 +323,13 @@ impl FlashblockStateCacheInner { cache_height = self.confirm_height, "Flashblocks state cache received canonical block, flushing confirm cache up to canonical height" ); - self.confirm_cache.flush_up_to(canon_height); + self.confirm_cache.flush_up_to_height(canon_height); } + // Update state heights + self.canon_height = canon_height; self.confirm_height = self.confirm_height.max(canon_height); } - fn flush(&mut self) { - self.confirm_height = 0; - self.pending_cache = None; - self.confirm_cache.clear(); - } - pub fn get_confirmed_block(&self) -> Option> { self.get_block_by_number(self.confirm_height) } @@ -284,4 +362,30 @@ impl FlashblockStateCacheInner { .and_then(|p| p.get_tx_info(tx_hash)) .or_else(|| self.confirm_cache.get_tx_info(tx_hash)) } + + /// Returns all `ExecutedBlock`s up to `target_height`. + fn get_executed_blocks_up_to_height( + &self, + target_height: u64, + ) -> eyre::Result>>> { + if self.confirm_height == 0 + || self.canon_height == 0 + || target_height > self.confirm_height + 1 + || target_height <= self.canon_height + { + // Cache not initialized or target height is outside the cache range + return Ok(None); + } + let mut blocks = Vec::new(); + if let Some(p) = self.pending_cache.as_ref() + && p.get_height() == target_height + { + blocks.push(p.pending.executed_block.clone()); + } + blocks.extend( + self.confirm_cache + .get_executed_blocks_up_to_height(target_height, self.canon_height)?, + ); + Ok(Some(blocks)) + } } diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 135ccd20..67c656ed 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -14,6 +14,7 @@ default = [] xlayer-flashblocks.workspace = true # reth +reth-chain-state.workspace = true reth-optimism-rpc.workspace = true reth-optimism-primitives.workspace = true reth-primitives-traits.workspace = true @@ -21,6 +22,8 @@ reth-rpc.workspace = true reth-rpc-convert.workspace = true reth-rpc-eth-api.workspace = true reth-rpc-eth-types.workspace = true +reth-rpc-server-types.workspace = true +reth-storage-api.workspace = true # alloy alloy-consensus.workspace = true diff --git a/crates/rpc/src/eth.rs b/crates/rpc/src/eth.rs index 13d84c81..160fe913 100644 --- a/crates/rpc/src/eth.rs +++ b/crates/rpc/src/eth.rs @@ -24,8 +24,10 @@ use reth_rpc::eth::EthFilter; use reth_rpc_convert::RpcTransaction; use reth_rpc_eth_api::{ helpers::{EthBlocks, EthCall, EthState, EthTransactions, FullEthApi}, - EthApiServer, EthApiTypes, RpcBlock, RpcReceipt, + EthApiServer, EthApiTypes, RpcBlock, RpcNodeCore, RpcReceipt, }; +use reth_rpc_server_types::result::ToRpcResult; +use reth_storage_api::{StateProvider, StateProviderBox, StateProviderFactory}; use xlayer_flashblocks::FlashblockStateCache; @@ -320,7 +322,7 @@ where block_hash: B256, index: Index, ) -> RpcResult>> { - trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getRawTransactionByBlockHashAndIndex"); + trace!(target: "rpc::eth", ?block_hash, ?index, "Serving eth_getTransactionByBlockHashAndIndex"); if let Some(bar) = self.flashblocks_state.get_block_by_hash(&block_hash) { return to_rpc_transaction_from_bar_and_index::( &bar, @@ -347,7 +349,7 @@ where ) .map_err(Into::into); } - self.eth_api.transaction_by_block_number_and_index(block_number, index).await + self.eth_api.transaction_by_block_number_and_index(number, index).await } /// Handler for: `eth_getRawTransactionByBlockHashAndIndex` @@ -377,7 +379,7 @@ where { return Ok(Some(tx.encoded_2718().into())); } - self.eth_api.raw_transaction_by_block_number_and_index(hash, index).await + self.eth_api.raw_transaction_by_block_number_and_index(number, index).await } /// Handler for: `eth_sendRawTransactionSync` @@ -396,28 +398,28 @@ where state_overrides: Option, block_overrides: Option>, ) -> RpcResult { - trace!(target: "rpc::eth", ?request, ?block_number, ?state_overrides, ?block_overrides, "Serving eth_call"); - // TODO: Implement state provider + trace!(target: "rpc::eth", ?transaction, ?block_number, ?state_overrides, ?block_overrides, "Serving eth_call"); + // Phase 1: delegate to eth_api. Phase 2 enhancement: merge flashblock state overrides. self.eth_api.call(transaction, block_number, state_overrides, block_overrides).await } /// Handler for: `eth_estimateGas` async fn estimate_gas( &self, - transaction: alloy_rpc_types_eth::OpTransactionRequest, + transaction: OpTransactionRequest, block_number: Option, overrides: Option, ) -> RpcResult { - trace!(target: "rpc::eth", ?request, ?block_number, "Serving eth_estimateGas"); - // TODO: Implement state provider + trace!(target: "rpc::eth", ?transaction, ?block_number, "Serving eth_estimateGas"); + // Phase 1: delegate to eth_api. Phase 2 enhancement: merge flashblock state overrides. self.eth_api.estimate_gas(transaction, block_number, overrides).await } /// Handler for: `eth_getBalance` async fn balance(&self, address: Address, block_number: Option) -> RpcResult { trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getBalance"); - if let Some(_bar) = self.flashblocks_state.get_rpc_block_by_id(block_number) { - // TODO: Implement state provider + if let Some(state) = self.get_flashblock_state_provider_by_id(block_number)? { + return Ok(state.account_balance(&address).to_rpc_result()?.unwrap_or_default()); } self.eth_api.balance(address, block_number).await } @@ -428,13 +430,25 @@ where address: Address, block_number: Option, ) -> RpcResult { - if let Some(_bar) = self.flashblocks_state.get_rpc_block_by_id(block_number) { - // TODO: Implement state provider + trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getTransactionCount"); + if let Some(state) = self.get_flashblock_state_provider_by_id(block_number)? { + return Ok(U256::from( + state.account_nonce(&address).to_rpc_result()?.unwrap_or_default(), + )); } self.eth_api.transaction_count(address, block_number).await } + /// Handler for: `eth_getCode` async fn get_code(&self, address: Address, block_number: Option) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getCode"); + if let Some(state) = self.get_flashblock_state_provider_by_id(block_number)? { + return Ok(state + .account_code(&address) + .to_rpc_result()? + .map(|code| code.original_bytes()) + .unwrap_or_default()); + } self.eth_api.get_code(address, block_number).await } @@ -445,9 +459,32 @@ where slot: U256, block_number: Option, ) -> RpcResult { - if let Some(_bar) = self.flashblocks_state.get_rpc_block_by_id(block_number) { - // TODO: Implement state provider + trace!(target: "rpc::eth", ?address, ?slot, ?block_number, "Serving eth_getStorageAt"); + if let Some(state) = self.get_flashblock_state_provider_by_id(block_number)? { + let storage_key = B256::new(slot.to_be_bytes()); + return Ok(B256::new( + state + .storage(address, storage_key) + .to_rpc_result()? + .unwrap_or_default() + .to_be_bytes(), + )); } self.eth_api.storage_at(address, slot, block_number).await } } + +impl XLayerEthApiExt +where + Eth: FullEthApi + Send + Sync + 'static, +{ + /// Returns a `StateProvider` overlaying flashblock execution state on top of canonical state + /// for the given block ID. Returns `None` if the block is not in the flashblocks cache. + fn get_flashblock_state_provider_by_id( + &self, + block_id: Option, + ) -> RpcResult> { + let canon_state = self.eth_api.provider().latest().to_rpc_result()?; + Ok(self.flashblocks_state.get_state_provider_by_id(block_id, canon_state)) + } +} From b122e33f2bd7afd53deddceff3459ccdd187d56a Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 13 Mar 2026 14:58:11 +0800 Subject: [PATCH 20/76] feat(flashblocks-rpc): support eth_sendRawTransactionSync MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- Cargo.lock | 3 ++ bin/node/src/main.rs | 16 +++--- crates/flashblocks/src/cache/mod.rs | 28 ++++++++++- crates/rpc/Cargo.toml | 4 ++ crates/rpc/src/eth.rs | 78 ++++++++++++++++++++++++++--- crates/rpc/src/helper.rs | 30 +---------- 6 files changed, 113 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d1afec7e..b37ba44a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14415,7 +14415,9 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", "async-trait", + "futures", "jsonrpsee", + "jsonrpsee-types", "op-alloy-network", "reth-chain-state", "reth-optimism-primitives", @@ -14428,6 +14430,7 @@ dependencies = [ "reth-rpc-server-types", "reth-storage-api", "tokio", + "tokio-stream", "tracing", "xlayer-flashblocks", ] diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index 0a8e54f1..4a24fb56 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -124,10 +124,10 @@ fn main() { .extend_rpc_modules(move |ctx| { let new_op_eth_api = Arc::new(ctx.registry.eth_api().clone()); - // Initialize flashblocks RPC service and cache if flashblocks URL is configured. let flashblocks_state = if let Some(flashblock_url) = args.xlayer_args.flashblocks_rpc.flashblock_url { + // Initialize flashblocks RPC let flashblocks_state = FlashblockStateCache::new(); let stream = WsFlashBlockStream::new(flashblock_url); let service = FlashblocksRpcService::new( @@ -140,10 +140,11 @@ fn main() { service.spawn(); info!(target: "reth::cli", "xlayer flashblocks service initialized"); + // Initialize custom flashblocks subscription if args.xlayer_args.flashblocks_rpc.enable_flashblocks_subscription { let flashblocks_pubsub = FlashblocksPubSub::new( ctx.registry.eth_handlers().pubsub.clone(), - service.subscribe_pending_sequence(), + flashblocks_state.subscribe_pending_sequence(), Box::new(ctx.node().task_executor().clone()), new_op_eth_api.converter().clone(), xlayer_args.flashblocks_subscription_max_addresses, @@ -152,9 +153,10 @@ fn main() { RethRpcModule::Eth, flashblocks_pubsub.into_rpc(), )?; - info!(target: "reth::cli", "xlayer eth pubsub initialized"); + info!(target: "reth::cli", "xlayer flashblocks pubsub initialized"); } + // Register flashblocks Eth API overrides let flashblocks_eth = XLayerEthApiExt::new( ctx.registry.eth_api().clone(), flashblocks_state.clone(), @@ -163,20 +165,18 @@ fn main() { RethRpcModule::Eth, EthApiOverrideServer::into_rpc(flashblocks_eth), )?; - info!(target: "reth::cli", "xlayer flashblocks eth api override enabled"); - + info!(target: "reth::cli", "xlayer flashblocks eth api overrides initialized"); Some(flashblocks_state) } else { None }; - // Register X Layer RPC (eth_flashblocksEnabled) — always active + // Register X Layer RPC let xlayer_rpc = XlayerRpcExt::new(flashblocks_state); ctx.modules.merge_configured(XlayerRpcExtApiServer::into_rpc( xlayer_rpc, ))?; - info!(target: "reth::cli", "xlayer rpc extension enabled"); - + info!(target: "reth::cli", "xlayer eth rpc extension enabled"); info!(message = "X Layer RPC modules initialized"); Ok(()) }) diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 23d23088..18d6b40f 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -7,8 +7,10 @@ pub use confirm::ConfirmCache; pub use pending::PendingSequence; pub use raw::RawFlashblocksCache; +use crate::PendingSequenceRx; use parking_lot::RwLock; use std::sync::Arc; +use tokio::sync::watch; use tracing::*; use alloy_consensus::BlockHeader; @@ -108,6 +110,12 @@ impl FlashblockStateCache { self.inner.read().get_tx_info(tx_hash) } + /// Returns a cloned watch receiver for pending sequence updates. + /// Used by `eth_sendRawTransactionSync` to watch for sub-block preconfirmation. + pub fn subscribe_pending_sequence(&self) -> PendingSequenceRx { + self.inner.read().subscribe_pending_sequence() + } + /// Creates a `StateProviderBox` that overlays the flashblock execution state on top of the /// canonical state for the given block ID. Instantiates a `MemoryOverlayStateProvider` by /// getting the ordered `ExecutedBlock`s from the cache, and overlaying them on top of the @@ -229,15 +237,25 @@ struct FlashblockStateCacheInner { confirm_height: u64, /// Highest confirmed block height in the canonical chainstate. canon_height: u64, + /// Receiver of the most recent executed [`PendingSequence`] built from the latest + /// flashblocks sequence. + pending_sequence_rx: PendingSequenceRx, + /// Sender of the most recent executed [`PendingSequence`] built from the latest + /// flashblocks sequence. + pending_sequence_tx: watch::Sender>>, } impl FlashblockStateCacheInner { fn new() -> Self { + let (tx, rx) = watch::channel(None); + Self { pending_cache: None, confirm_cache: ConfirmCache::new(), confirm_height: 0, canon_height: 0, + pending_sequence_rx: rx, + pending_sequence_tx: tx, } } @@ -291,10 +309,12 @@ impl FlashblockStateCacheInner { sequence.pending.executed_block, sequence.pending.receipts, )?; - self.pending_cache = Some(pending_sequence); + self.pending_cache = Some(pending_sequence.clone()); + self.pending_sequence_tx.send(Some(pending_sequence)); } else if pending_height == expected_height { // Replace the existing pending sequence - self.pending_cache = Some(pending_sequence); + self.pending_cache = Some(pending_sequence.clone()); + self.pending_sequence_tx.send(Some(pending_sequence)); } else { return Err(eyre::eyre!( "polluted state cache - not next consecutive pending height block" @@ -388,4 +408,8 @@ impl FlashblockStateCacheInner { ); Ok(Some(blocks)) } + + pub fn subscribe_pending_sequence(&self) -> PendingSequenceRx { + self.pending_sequence_rx.clone() + } } diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 67c656ed..9276384b 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -37,8 +37,12 @@ op-alloy-network.workspace = true # rpc async-trait.workspace = true jsonrpsee.workspace = true +jsonrpsee-types.workspace = true # misc +futures.workspace = true +tokio.workspace = true +tokio-stream.workspace = true tracing.workspace = true [dev-dependencies] diff --git a/crates/rpc/src/eth.rs b/crates/rpc/src/eth.rs index 160fe913..8a6fb3c0 100644 --- a/crates/rpc/src/eth.rs +++ b/crates/rpc/src/eth.rs @@ -1,10 +1,12 @@ use crate::helper::{ - to_block_receipts, to_rpc_block, to_rpc_receipt, to_rpc_transaction_from_bar_and_index, + to_block_receipts, to_rpc_block, to_rpc_transaction, to_rpc_transaction_from_bar_and_index, }; +use futures::StreamExt; use jsonrpsee::{ core::{async_trait, RpcResult}, proc_macros::rpc, }; +use tokio_stream::wrappers::WatchStream; use tracing::*; use alloy_consensus::BlockHeader; @@ -17,6 +19,7 @@ use alloy_rpc_types_eth::{ use op_alloy_network::Optimism; use op_alloy_rpc_types::OpTransactionRequest; +use reth_chain_state::CanonStateSubscriptions; use reth_optimism_primitives::OpPrimitives; use reth_optimism_rpc::eth::OpEthApi; use reth_primitives_traits::{BlockBody, NodePrimitives, SignerRecoverable}; @@ -26,6 +29,7 @@ use reth_rpc_eth_api::{ helpers::{EthBlocks, EthCall, EthState, EthTransactions, FullEthApi}, EthApiServer, EthApiTypes, RpcBlock, RpcNodeCore, RpcReceipt, }; +use reth_rpc_eth_types::{block::convert_transaction_receipt, EthApiError}; use reth_rpc_server_types::result::ToRpcResult; use reth_storage_api::{StateProvider, StateProviderBox, StateProviderFactory}; @@ -135,7 +139,7 @@ pub trait FlashblocksEthApiOverride { /// /// This will return a timeout error if the transaction isn't included within some time period. #[method(name = "sendRawTransactionSync")] - async fn send_raw_transaction_sync(&self, bytes: Bytes) -> RpcResult; + async fn send_raw_transaction_sync(&self, bytes: Bytes) -> RpcResult>; // ----------------- State apis ----------------- /// Executes a new message call immediately without creating a transaction on the block chain, @@ -310,8 +314,11 @@ where /// Handler for: `eth_getTransactionReceipt` async fn transaction_receipt(&self, hash: TxHash) -> RpcResult>> { trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionReceipt"); - if let Some((info, bar)) = self.flashblocks_state.get_tx_info(&hash) { - return Ok(Some(to_rpc_receipt::(&info, &bar, self.eth_api.converter())?)); + if let Some((_, bar)) = self.flashblocks_state.get_tx_info(&hash) + && let Some(Ok(receipt)) = + bar.find_and_convert_transaction_receipt(hash, self.eth_api.converter()) + { + return Ok(Some(receipt)); } self.eth_api.transaction_receipt(hash).await } @@ -383,10 +390,67 @@ where } /// Handler for: `eth_sendRawTransactionSync` - async fn send_raw_transaction_sync(&self, tx: Bytes) -> RpcResult> { + async fn send_raw_transaction_sync(&self, tx: Bytes) -> RpcResult> { trace!(target: "rpc::eth", ?tx, "Serving eth_sendRawTransactionSync"); - // TODO: Implement - self.eth_api.send_raw_transaction_sync(tx).await + let timeout_duration = EthTransactions::send_raw_transaction_sync_timeout(&self.eth_api); + let hash = + EthTransactions::send_raw_transaction(&self.eth_api, tx).await.map_err(Into::into)?; + let converter = self.eth_api.converter(); + + let mut canonical_stream = self.eth_api.provider().canonical_state_stream(); + let mut flashblock_stream = + WatchStream::new(self.flashblocks_state.subscribe_pending_sequence()); + + tokio::time::timeout(timeout_duration, async { + loop { + tokio::select! { + biased; + // check if the tx was preconfirmed in the latest flashblocks pending sequence + pending = flashblock_stream.next() => { + if let Some(pending_sequence) = pending.flatten() { + let bar = pending_sequence.get_block_and_receipts(); + if let Some(receipt) = + bar.find_and_convert_transaction_receipt(hash, converter) + { + return receipt; + } + } + } + // Listen for regular canonical block updates for inclusion + canonical_notification = canonical_stream.next() => { + if let Some(notification) = canonical_notification { + let chain = notification.committed(); + if let Some((block, tx, receipt, all_receipts)) = + chain.find_transaction_and_receipt_by_hash(hash) + { + if let Some(receipt) = convert_transaction_receipt( + block, + all_receipts, + tx, + receipt, + converter, + ) + .transpose() + .map_err(Into::into)? + { + return Ok(receipt); + } + } + } else { + // Canonical stream ended + break; + } + } + } + } + Err(EthApiError::TransactionConfirmationTimeout { hash, duration: timeout_duration } + .into()) + }) + .await + .unwrap_or_else(|_elapsed| { + Err(EthApiError::TransactionConfirmationTimeout { hash, duration: timeout_duration } + .into()) + }) } // ----------------- State apis ----------------- diff --git a/crates/rpc/src/helper.rs b/crates/rpc/src/helper.rs index d5879ccf..e9d75269 100644 --- a/crates/rpc/src/helper.rs +++ b/crates/rpc/src/helper.rs @@ -101,35 +101,7 @@ where }) .collect::>(); - Ok(converter.convert_receipts(inputs)?) -} - -/// Converts a single `CachedTxInfo` into an RPC receipt, using the full block receipts -/// from `BlockAndReceipts` to correctly calculate gas used and log index offsets. -pub(crate) fn to_rpc_receipt>( - info: &CachedTxInfo, - bar: &BlockAndReceipts, - converter: &Eth::RpcConvert, -) -> Result, Eth::Error> -where - Eth::RpcConvert: RpcConvert, - Eth::Error: From<::Error>, -{ - let (prev_cumulative_gas, next_log_index) = - calculate_gas_used_and_next_log_index(info.tx_index, bar.receipts.as_ref()); - - let meta = build_tx_meta(bar, info.tx.tx_hash(), info.tx_index); - let recovered = info.tx.try_into_recovered_unchecked()?; - Ok(converter - .convert_receipts(vec![ConvertReceiptInput { - tx: recovered.as_recovered_ref(), - gas_used: info.receipt.cumulative_gas_used() - prev_cumulative_gas, - next_log_index, - meta, - receipt: info.receipt.clone(), - }])? - .pop() - .unwrap()) + Ok(converter.convert_receipts_with_block(inputs, bar.sealed_block())?) } /// Converts a `CachedTxInfo` and `BlockAndReceipts` into an RPC transaction. From dafa740d6a5ec8a110a29180e7cfe50e6aa79ce9 Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 13 Mar 2026 16:06:38 +0800 Subject: [PATCH 21/76] feat(flashblocks-rpc): support eth_call and eth_estimateGas MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- Cargo.lock | 1 + crates/flashblocks/src/cache/mod.rs | 55 +++++++++++++++-------------- crates/rpc/Cargo.toml | 1 + crates/rpc/src/eth.rs | 44 ++++++++++++++++++----- 4 files changed, 66 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b37ba44a..2acc63d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14423,6 +14423,7 @@ dependencies = [ "reth-optimism-primitives", "reth-optimism-rpc", "reth-primitives-traits", + "reth-revm", "reth-rpc", "reth-rpc-convert", "reth-rpc-eth-api", diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 18d6b40f..7907a782 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -17,7 +17,7 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{TxHash, B256}; use alloy_rpc_types_eth::{BlockId, BlockNumberOrTag}; use reth_chain_state::{ExecutedBlock, MemoryOverlayStateProvider}; -use reth_primitives_traits::{NodePrimitives, ReceiptTy}; +use reth_primitives_traits::{NodePrimitives, ReceiptTy, SealedHeaderFor}; use reth_rpc_eth_types::block::BlockAndReceipts; use reth_storage_api::StateProviderBox; @@ -159,33 +159,36 @@ impl FlashblockStateCache { &self, block_id: Option, canonical_state: StateProviderBox, - ) -> Option { - let in_memory = { - let guard = self.inner.read(); - let block_num = match block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)) { - BlockId::Number(id) => match id { - BlockNumberOrTag::Pending => guard.get_pending_block(), - BlockNumberOrTag::Latest => guard.get_confirmed_block(), - BlockNumberOrTag::Number(num) => guard.get_block_by_number(num), - _ => None, - }, - BlockId::Hash(hash) => guard.get_block_by_hash(&hash.block_hash), - }? - .block - .number(); - - match guard.get_executed_blocks_up_to_height(block_num) { - Ok(Some(blocks)) => blocks, - Ok(None) => return None, - Err(e) => { - warn!(target: "flashblocks", "Failed to get flashblocks state provider: {e}. Flushing cache"); - drop(guard); - self.inner.write().flush(); - return None; - } + ) -> Option<(StateProviderBox, SealedHeaderFor)> { + let guard = self.inner.read(); + let block = match block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)) { + BlockId::Number(id) => match id { + BlockNumberOrTag::Pending => guard.get_pending_block(), + BlockNumberOrTag::Latest => guard.get_confirmed_block(), + BlockNumberOrTag::Number(num) => guard.get_block_by_number(num), + _ => None, + }, + BlockId::Hash(hash) => guard.get_block_by_hash(&hash.block_hash), + }? + .block; + let block_num = block.number(); + + let in_memory = guard.get_executed_blocks_up_to_height(block_num); + drop(guard); + + let in_memory = match in_memory { + Ok(Some(blocks)) => blocks, + Ok(None) => return None, + Err(e) => { + warn!(target: "flashblocks", "Failed to get flashblocks state provider: {e}. Flushing cache"); + self.inner.write().flush(); + return None; } }; - Some(Box::new(MemoryOverlayStateProvider::new(canonical_state, in_memory))) + Some(( + Box::new(MemoryOverlayStateProvider::new(canonical_state, in_memory)), + block.clone_sealed_header(), + )) } } diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 9276384b..84ebde8a 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -23,6 +23,7 @@ reth-rpc-convert.workspace = true reth-rpc-eth-api.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true +reth-revm.workspace = true reth-storage-api.workspace = true # alloy diff --git a/crates/rpc/src/eth.rs b/crates/rpc/src/eth.rs index 8a6fb3c0..0a6bfdfe 100644 --- a/crates/rpc/src/eth.rs +++ b/crates/rpc/src/eth.rs @@ -22,14 +22,15 @@ use op_alloy_rpc_types::OpTransactionRequest; use reth_chain_state::CanonStateSubscriptions; use reth_optimism_primitives::OpPrimitives; use reth_optimism_rpc::eth::OpEthApi; -use reth_primitives_traits::{BlockBody, NodePrimitives, SignerRecoverable}; +use reth_primitives_traits::{BlockBody, NodePrimitives, SealedHeaderFor, SignerRecoverable}; +use reth_revm::{database::StateProviderDatabase, db::State}; use reth_rpc::eth::EthFilter; use reth_rpc_convert::RpcTransaction; use reth_rpc_eth_api::{ helpers::{EthBlocks, EthCall, EthState, EthTransactions, FullEthApi}, EthApiServer, EthApiTypes, RpcBlock, RpcNodeCore, RpcReceipt, }; -use reth_rpc_eth_types::{block::convert_transaction_receipt, EthApiError}; +use reth_rpc_eth_types::{block::convert_transaction_receipt, error::FromEvmError, EthApiError}; use reth_rpc_server_types::result::ToRpcResult; use reth_storage_api::{StateProvider, StateProviderBox, StateProviderFactory}; @@ -463,7 +464,21 @@ where block_overrides: Option>, ) -> RpcResult { trace!(target: "rpc::eth", ?transaction, ?block_number, ?state_overrides, ?block_overrides, "Serving eth_call"); - // Phase 1: delegate to eth_api. Phase 2 enhancement: merge flashblock state overrides. + if let Some((state, header)) = self.get_flashblock_state_provider_by_id(block_number)? { + let evm_env = + EthState::evm_env_for_header(&self.eth_api, &header).map_err(Into::into)?; + let mut db = State::builder().with_database(StateProviderDatabase::new(state)).build(); + let (evm_env, tx_env) = EthCall::prepare_call_env( + &self.eth_api, + evm_env, + transaction, + &mut db, + EvmOverrides::new(state_overrides, block_overrides), + ) + .map_err(Into::into)?; + let res = EthCall::transact(&self.eth_api, db, evm_env, tx_env).map_err(Into::into)?; + return >::ensure_success(res.result).map_err(Into::into); + } self.eth_api.call(transaction, block_number, state_overrides, block_overrides).await } @@ -475,14 +490,25 @@ where overrides: Option, ) -> RpcResult { trace!(target: "rpc::eth", ?transaction, ?block_number, "Serving eth_estimateGas"); - // Phase 1: delegate to eth_api. Phase 2 enhancement: merge flashblock state overrides. + if let Some((state, header)) = self.get_flashblock_state_provider_by_id(block_number)? { + let evm_env = + EthState::evm_env_for_header(&self.eth_api, &header).map_err(Into::into)?; + return EthCall::estimate_gas_with( + &self.eth_api, + evm_env, + transaction, + state, + overrides, + ) + .map_err(Into::into); + } self.eth_api.estimate_gas(transaction, block_number, overrides).await } /// Handler for: `eth_getBalance` async fn balance(&self, address: Address, block_number: Option) -> RpcResult { trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getBalance"); - if let Some(state) = self.get_flashblock_state_provider_by_id(block_number)? { + if let Some((state, _)) = self.get_flashblock_state_provider_by_id(block_number)? { return Ok(state.account_balance(&address).to_rpc_result()?.unwrap_or_default()); } self.eth_api.balance(address, block_number).await @@ -495,7 +521,7 @@ where block_number: Option, ) -> RpcResult { trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getTransactionCount"); - if let Some(state) = self.get_flashblock_state_provider_by_id(block_number)? { + if let Some((state, _)) = self.get_flashblock_state_provider_by_id(block_number)? { return Ok(U256::from( state.account_nonce(&address).to_rpc_result()?.unwrap_or_default(), )); @@ -506,7 +532,7 @@ where /// Handler for: `eth_getCode` async fn get_code(&self, address: Address, block_number: Option) -> RpcResult { trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getCode"); - if let Some(state) = self.get_flashblock_state_provider_by_id(block_number)? { + if let Some((state, _)) = self.get_flashblock_state_provider_by_id(block_number)? { return Ok(state .account_code(&address) .to_rpc_result()? @@ -524,7 +550,7 @@ where block_number: Option, ) -> RpcResult { trace!(target: "rpc::eth", ?address, ?slot, ?block_number, "Serving eth_getStorageAt"); - if let Some(state) = self.get_flashblock_state_provider_by_id(block_number)? { + if let Some((state, _)) = self.get_flashblock_state_provider_by_id(block_number)? { let storage_key = B256::new(slot.to_be_bytes()); return Ok(B256::new( state @@ -547,7 +573,7 @@ where fn get_flashblock_state_provider_by_id( &self, block_id: Option, - ) -> RpcResult> { + ) -> RpcResult)>> { let canon_state = self.eth_api.provider().latest().to_rpc_result()?; Ok(self.flashblocks_state.get_state_provider_by_id(block_id, canon_state)) } From 30ae3a1c572486fdfcd1b9e53d39e0dcd157eab8 Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 13 Mar 2026 16:23:03 +0800 Subject: [PATCH 22/76] fix(flashblocks-rpc): simplify eth_call and eth_estimateGas trait calls MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- crates/rpc/src/eth.rs | 39 +++++++++++++++++---------------------- 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/crates/rpc/src/eth.rs b/crates/rpc/src/eth.rs index 0a6bfdfe..265fb02a 100644 --- a/crates/rpc/src/eth.rs +++ b/crates/rpc/src/eth.rs @@ -27,7 +27,7 @@ use reth_revm::{database::StateProviderDatabase, db::State}; use reth_rpc::eth::EthFilter; use reth_rpc_convert::RpcTransaction; use reth_rpc_eth_api::{ - helpers::{EthBlocks, EthCall, EthState, EthTransactions, FullEthApi}, + helpers::{estimate::EstimateCall, EthBlocks, EthCall, EthState, EthTransactions, FullEthApi}, EthApiServer, EthApiTypes, RpcBlock, RpcNodeCore, RpcReceipt, }; use reth_rpc_eth_types::{block::convert_transaction_receipt, error::FromEvmError, EthApiError}; @@ -465,19 +465,19 @@ where ) -> RpcResult { trace!(target: "rpc::eth", ?transaction, ?block_number, ?state_overrides, ?block_overrides, "Serving eth_call"); if let Some((state, header)) = self.get_flashblock_state_provider_by_id(block_number)? { - let evm_env = - EthState::evm_env_for_header(&self.eth_api, &header).map_err(Into::into)?; + let evm_env = self.eth_api.evm_env_for_header(&header).map_err(Into::into)?; let mut db = State::builder().with_database(StateProviderDatabase::new(state)).build(); - let (evm_env, tx_env) = EthCall::prepare_call_env( - &self.eth_api, - evm_env, - transaction, - &mut db, - EvmOverrides::new(state_overrides, block_overrides), - ) - .map_err(Into::into)?; + let (evm_env, tx_env) = self + .eth_api + .prepare_call_env( + evm_env, + transaction, + &mut db, + EvmOverrides::new(state_overrides, block_overrides), + ) + .map_err(Into::into)?; let res = EthCall::transact(&self.eth_api, db, evm_env, tx_env).map_err(Into::into)?; - return >::ensure_success(res.result).map_err(Into::into); + return ::Error::ensure_success(res.result).map_err(Into::into); } self.eth_api.call(transaction, block_number, state_overrides, block_overrides).await } @@ -491,16 +491,11 @@ where ) -> RpcResult { trace!(target: "rpc::eth", ?transaction, ?block_number, "Serving eth_estimateGas"); if let Some((state, header)) = self.get_flashblock_state_provider_by_id(block_number)? { - let evm_env = - EthState::evm_env_for_header(&self.eth_api, &header).map_err(Into::into)?; - return EthCall::estimate_gas_with( - &self.eth_api, - evm_env, - transaction, - state, - overrides, - ) - .map_err(Into::into); + let evm_env = self.eth_api.evm_env_for_header(&header).map_err(Into::into)?; + return self + .eth_api + .estimate_gas_with(evm_env, transaction, state, overrides) + .map_err(Into::into); } self.eth_api.estimate_gas(transaction, block_number, overrides).await } From b41d65b6357a2214c0bef7a6f4970ba0d5e29f2d Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 13 Mar 2026 17:49:26 +0800 Subject: [PATCH 23/76] feat(flashblocks-rpc): support raw flashblocks payload cache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- crates/flashblocks/src/cache/raw.rs | 1960 ++------------------------- 1 file changed, 116 insertions(+), 1844 deletions(-) diff --git a/crates/flashblocks/src/cache/raw.rs b/crates/flashblocks/src/cache/raw.rs index d38d0f75..5aa5e754 100644 --- a/crates/flashblocks/src/cache/raw.rs +++ b/crates/flashblocks/src/cache/raw.rs @@ -1,5 +1,5 @@ -use crate::execution::BuildArgs; use parking_lot::RwLock; +use ringbuffer::{AllocRingBuffer, RingBuffer}; use std::{collections::BTreeMap, sync::Arc}; use tracing::*; @@ -8,103 +8,170 @@ use alloy_primitives::B256; use alloy_rpc_types_engine::PayloadId; use op_alloy_rpc_types_engine::OpFlashblockPayload; -use reth_primitives_traits::{ - transaction::TxHashRef, NodePrimitives, Recovered, SignedTransaction, -}; +use reth_primitives_traits::{transaction::TxHashRef, Recovered, SignedTransaction}; + +const MAX_RAW_CACHE_SIZE: usize = 10; + +/// The raw flashblocks sequence cache for new incoming flashblocks from the sequencer. +/// The cache accumulates last two flashblocks sequences in memory, to handle scenario +/// when flashblocks received are out-of-order, and committing the previous sequence +/// state to the state cache is not yet possible due to parent hash mismatch (we still +/// need the previous flashblocks sequence to compute the state root). +/// +/// The raw cache is used to: +/// 1. Track the next best sequence to build, based on cache state (consecutive flashblocks +/// required) +/// 2. Re-org detection when a new flashblock is received +pub struct RawFlashblocksCache { + inner: Arc>>, +} + +impl RawFlashblocksCache { + pub fn new() -> Self { + let inner = Arc::new(RwLock::new(RawFlashblocksCacheInner::new())); + Self { inner } + } + + pub fn handle_canonical_height(&mut self, height: u64) { + self.inner.write().handle_canonical_height(height); + } + + pub fn handle_flashblock(&mut self, flashblock: OpFlashblockPayload) -> eyre::Result<()> { + self.inner.write().handle_flashblock(flashblock) + } +} + +#[derive(Debug, Clone)] +pub struct RawFlashblocksCacheInner { + cache: AllocRingBuffer>, + canon_height: u64, +} + +impl RawFlashblocksCacheInner { + fn new() -> Self { + Self { cache: AllocRingBuffer::new(MAX_RAW_CACHE_SIZE), canon_height: 0 } + } + + pub fn handle_canonical_height(&mut self, height: u64) { + self.canon_height = height; + // Evict entries from the front (oldest) whose block number is at or + // below the new canonical height. + while self + .cache + .front() + .is_some_and(|entry| entry.block_number().is_some_and(|n| n <= height)) + { + self.cache.dequeue(); + } + } + + pub fn handle_flashblock(&mut self, flashblock: OpFlashblockPayload) -> eyre::Result<()> { + if flashblock.block_number() <= self.canon_height { + debug!( + target: "flashblocks", + flashblock_number = flashblock.block_number(), + canon_height = self.canon_height, + "Received old flashblock behind canonical height, skip adding", + ); + return Ok(()); + } + + // Search for an existing entry matching this payload_id. + let existing = + self.cache.iter_mut().find(|entry| entry.payload_id() == Some(flashblock.payload_id)); + + if let Some(entry) = existing { + entry.insert_flashblock(flashblock)?; + } else { + // New sequence — push to ring buffer, evicting the oldest entry + // when the cache is full. + let mut entry = RawFlashblocksEntry::new(); + entry.insert_flashblock(flashblock)?; + self.cache.push(entry); + } + Ok(()) + } +} /// Raw flashblocks sequence keeps track of the flashblocks sequence based on their /// `payload_id`. -#[derive(Debug)] -struct RawFlashblocksSequence { +#[derive(Debug, Clone)] +struct RawFlashblocksEntry { /// Tracks the individual flashblocks in order - inner: BTreeMap, + payloads: BTreeMap, /// Tracks the recovered transactions by index recovered_transactions_by_index: BTreeMap>>>, /// Tracks if the accumulated sequence has received the first base flashblock has_base: bool, - /// Tracks the revision of the sequence - revision: u64, - /// Tracks the revision that has been applied to the state cache - applied_revision: Option, } -impl RawFlashblocksSequence { +impl RawFlashblocksEntry { fn new() -> Self { Self { - inner: BTreeMap::new(), + payloads: BTreeMap::new(), recovered_transactions_by_index: BTreeMap::new(), has_base: false, - revision: 0, - applied_revision: None, } } /// Inserts a flashblock into the sequence. fn insert_flashblock(&mut self, flashblock: OpFlashblockPayload) -> eyre::Result<()> { if !self.can_accept(&flashblock) { - return Err(eyre::eyre!("flashblock does not match current sequence id")); + warn!( + target: "flashblocks", + incoming_id = ?flashblock.payload_id, + current_id = ?self.payload_id(), + incoming_height = %flashblock.block_number(), + current_height = ?self.block_number(), + "Incoming flashblock failed to be accepted into the sequence, possible re-org detected", + ); + return Err(eyre::eyre!("incoming flashblock failed to be accepted into the sequence, possible re-org detected")); } if flashblock.index == 0 { - // Base flashblock received self.has_base = true; } - - // Only recover transactions once we've validated that this flashblock is accepted. let flashblock_index = flashblock.index; let recovered_txs = flashblock.recover_transactions().collect::, _>>()?; - self.inner.insert(flashblock_index, flashblock); + self.payloads.insert(flashblock_index, flashblock); self.recovered_transactions_by_index.insert(flashblock_index, recovered_txs); - self.bump_revision(); Ok(()) } /// Returns whether this flashblock would be accepted into the current sequence. fn can_accept(&self, flashblock: &OpFlashblockPayload) -> bool { - if flashblock.index == 0 && !self.has_base { + if self.payloads.is_empty() { return true; } - return self.block_number() == Some(flashblock.block_number()) - && self.payload_id() == Some(flashblock.payload_id); + self.block_number() == Some(flashblock.block_number()) + && self.payload_id() == Some(flashblock.payload_id) + && self.payloads.get(&flashblock.index).is_none() } - /// Returns the first block number - pub fn block_number(&self) -> Option { - Some(self.inner.values().next()?.block_number()) - } - - /// Returns the payload id of the first tracked flashblock in the current sequence. - pub fn payload_id(&self) -> Option { - Some(self.inner.values().next()?.payload_id) - } - - fn count(&self) -> usize { - self.inner.len() - } - - const fn revision(&self) -> u64 { - self.revision - } - - fn bump_revision(&mut self) { - // Iterate over the inner map and increment the revision for consecutive flashblocks + fn get_best_revision(&self) -> Option { + if !self.has_base || self.payloads.is_empty() { + return None; + } let mut new_revision = 0; - for (index, _) in self.inner.iter() { + for (index, _) in self.payloads.iter() { if *index == 0 { continue; } - // If the index is not consecutive, break the loop if new_revision != *index - 1 { break; } new_revision = *index; } - self.revision = new_revision; + Some(new_revision) } - const fn mark_revision_applied(&mut self, revision: u64) { - self.applied_revision = Some(revision); + pub fn block_number(&self) -> Option { + Some(self.payloads.values().next()?.block_number()) + } + + pub fn payload_id(&self) -> Option { + Some(self.payloads.values().next()?.payload_id) } fn transactions(&self) -> Vec>> { @@ -120,1798 +187,3 @@ impl RawFlashblocksSequence { self.recovered_transactions_by_index.values().map(Vec::len).sum() } } - -type RawFlashblocksCacheInner = - BTreeMap>; - -/// The raw flashblocks sequence cache for new incoming flashblocks from the sequencer. -/// The cache accumulates last two flashblocks sequences in memory, to handle scenario -/// when flashblocks received are out-of-order, and committing the previous sequence -/// state to the state cache is not yet possible due to parent hash mismatch (we still -/// need the previous flashblocks sequence to compute the state root). -/// -/// The raw cache is used to: -/// 1. Track the next best sequence to build, based on cache state (consecutive flashblocks -/// required) -/// 2. Re-org detection when a new flashblock is received -pub struct RawFlashblocksCache { - inner: Arc>>, -} - -impl RawFlashblocksCache { - /// Gets the next buildable sequence from the cache, returns None if no buildable - /// sequence is found. - pub(crate) fn next_buildable_args>( - &mut self, - local_tip_hash: B256, - local_tip_timestamp: u64, - ) -> Option>>, N>> { - // Try to find a buildable sequence: (ticket, base, last_fb, transactions, - // cached_state, source_name, pending_parent) - let (ticket, base, last_flashblock, transactions, cached_state, source_name, pending_parent) = - // Priority 1: Try current pending sequence (canonical mode) - if let Some(base) = self.pending.sequence.payload_base().filter(|b| b.parent_hash == local_tip_hash) { - let revision = self.pending.revision(); - if self.pending.is_revision_applied(revision) { - trace!( - target: "flashblocks", - block_number = base.block_number, - revision, - parent_hash = ?base.parent_hash, - "Skipping rebuild for already-applied pending revision" - ); - return None; - } - let sequence_id = SequenceId::from_pending(self.pending.sequence())?; - let ticket = BuildTicket::pending(sequence_id, revision); - let cached_state = self.pending.sequence.take_cached_reads().map(|r| (base.parent_hash, r)); - let last_fb = self.pending.sequence.last_flashblock()?; - let transactions = self.pending.transactions(); - (ticket, base, last_fb, transactions, cached_state, "pending", None) - } - // Priority 2: Try cached sequence with exact parent match (canonical mode) - else if let Some((cached, txs)) = self.newest_unexecuted_cached_for_parent(local_tip_hash) { - let sequence_id = SequenceId::from_complete(cached); - let ticket = BuildTicket::cached(sequence_id); - let base = cached.payload_base().clone(); - let last_fb = cached.last(); - let transactions = txs.clone(); - let cached_state = None; - (ticket, base, last_fb, transactions, cached_state, "cached", None) - } - // Priority 3: Try speculative building with pending parent state - else if let Some(ref pending_state) = pending_parent_state { - // Check if pending sequence's parent matches the pending state's block - if let Some(base) = self.pending.sequence.payload_base().filter(|b| b.parent_hash == pending_state.block_hash) { - let revision = self.pending.revision(); - if self.pending.is_revision_applied(revision) { - trace!( - target: "flashblocks", - block_number = base.block_number, - revision, - speculative_parent = ?pending_state.block_hash, - "Skipping speculative rebuild for already-applied pending revision" - ); - return None; - } - let sequence_id = SequenceId::from_pending(self.pending.sequence())?; - let ticket = BuildTicket::pending(sequence_id, revision); - let cached_state = self.pending.sequence.take_cached_reads().map(|r| (base.parent_hash, r)); - let last_fb = self.pending.sequence.last_flashblock()?; - let transactions = self.pending.transactions(); - ( - ticket, - base, - last_fb, - transactions, - cached_state, - "speculative-pending", - pending_parent_state, - ) - } - // Check cached sequences - else if let Some((cached, txs)) = self.newest_unexecuted_cached_for_parent(pending_state.block_hash) { - let sequence_id = SequenceId::from_complete(cached); - let ticket = BuildTicket::cached(sequence_id); - let base = cached.payload_base().clone(); - let last_fb = cached.last(); - let transactions = txs.clone(); - let cached_state = None; - ( - ticket, - base, - last_fb, - transactions, - cached_state, - "speculative-cached", - pending_parent_state, - ) - } else { - return None; - } - } else { - return None; - }; - - // Auto-detect when to compute state root: only if the builder didn't provide it (sent - // B256::ZERO) and we're near the expected final flashblock index. - // - // Background: Each block period receives multiple flashblocks at regular intervals. - // The sequencer sends an initial "base" flashblock at index 0 when a new block starts, - // then subsequent flashblocks are produced every FLASHBLOCK_BLOCK_TIME intervals (200ms). - // - // Examples with different block times: - // - Base (2s blocks): expect 2000ms / 200ms = 10 intervals → Flashblocks: index 0 (base) - // + indices 1-10 = potentially 11 total - // - // - Unichain (1s blocks): expect 1000ms / 200ms = 5 intervals → Flashblocks: index 0 (base) - // + indices 1-5 = potentially 6 total - // - // Why compute at N-1 instead of N: - // 1. Timing variance in flashblock producing time may mean only N flashblocks were produced - // instead of N+1 (missing the final one). Computing at N-1 ensures we get the state root - // for most common cases. - // - // 2. The +1 case (index 0 base + N intervals): If all N+1 flashblocks do arrive, we'll - // still calculate state root for flashblock N, which sacrifices a little performance but - // still ensures correctness for common cases. - // - // Note: Pathological cases may result in fewer flashblocks than expected (e.g., builder - // downtime, flashblock execution exceeding timing budget). When this occurs, we won't - // compute the state root, causing FlashblockConsensusClient to lack precomputed state for - // engine_newPayload. This is safe: we still have op-node as backstop to maintain - // chain progression. - let block_time_ms = base.timestamp.saturating_sub(local_tip_timestamp) * 1000; - let expected_final_flashblock = block_time_ms / FLASHBLOCK_BLOCK_TIME; - let compute_state_root = self.compute_state_root - && last_flashblock.diff.state_root.is_zero() - && last_flashblock.index >= expected_final_flashblock.saturating_sub(1); - - trace!( - target: "flashblocks", - block_number = base.block_number, - source = source_name, - ticket = ?ticket, - flashblock_index = last_flashblock.index, - expected_final_flashblock, - compute_state_root_enabled = self.compute_state_root, - state_root_is_zero = last_flashblock.diff.state_root.is_zero(), - will_compute_state_root = compute_state_root, - is_speculative = pending_parent.is_some(), - "Building from flashblock sequence" - ); - - Some(BuildCandidate { - ticket, - args: BuildArgs { - base, - transactions, - cached_state, - last_flashblock_index: last_flashblock.index, - last_flashblock_hash: last_flashblock.diff.block_hash, - compute_state_root, - pending_parent, - }, - }) - } - - /// Records the result of building a sequence and re-broadcasts with execution outcome. - /// - /// Updates execution outcome and cached reads. For cached sequences (already broadcast - /// once during finalize), this broadcasts again with the computed `state_root`, allowing - /// the consensus client to submit via `engine_newPayload`. - pub(crate) fn on_build_complete( - &mut self, - ticket: BuildTicket, - result: Option<(PendingFlashBlock, CachedReads)>, - ) -> BuildApplyOutcome { - let Some((computed_block, cached_reads)) = result else { - return BuildApplyOutcome::SkippedNoBuildResult; - }; - - // Extract execution outcome - let execution_outcome = computed_block.computed_state_root().map(|state_root| { - SequenceExecutionOutcome { block_hash: computed_block.block().hash(), state_root } - }); - - let outcome = self.apply_build_outcome(ticket, execution_outcome, cached_reads); - match outcome { - BuildApplyOutcome::SkippedNoBuildResult | BuildApplyOutcome::AppliedPending => {} - BuildApplyOutcome::AppliedCached { rebroadcasted } => { - trace!( - target: "flashblocks", - ticket = ?ticket, - rebroadcasted, - "Applied cached build completion" - ); - } - BuildApplyOutcome::RejectedPendingSequenceMismatch { - ticket_sequence_id, - current_sequence_id, - } => { - trace!( - target: "flashblocks", - ticket = ?ticket, - ?ticket_sequence_id, - ?current_sequence_id, - "Rejected build completion: pending sequence mismatch" - ); - } - BuildApplyOutcome::RejectedPendingRevisionStale { - sequence_id, - ticket_revision, - current_revision, - } => { - trace!( - target: "flashblocks", - ticket = ?ticket, - ?sequence_id, - ticket_revision, - current_revision, - "Rejected build completion: pending revision stale" - ); - } - BuildApplyOutcome::RejectedCachedSequenceMissing { sequence_id } => { - trace!( - target: "flashblocks", - ticket = ?ticket, - ?sequence_id, - "Rejected build completion: cached sequence missing" - ); - } - } - outcome - } - - /// Applies build output to the exact sequence targeted by the build job. - /// - /// Returns the apply outcome with explicit rejection reasons for observability. - fn apply_build_outcome( - &mut self, - ticket: BuildTicket, - execution_outcome: Option, - cached_reads: CachedReads, - ) -> BuildApplyOutcome { - match ticket.snapshot { - SequenceSnapshot::Pending { revision } => { - let current_sequence_id = SequenceId::from_pending(self.pending.sequence()); - if current_sequence_id != Some(ticket.sequence_id) { - return BuildApplyOutcome::RejectedPendingSequenceMismatch { - ticket_sequence_id: ticket.sequence_id, - current_sequence_id, - }; - } - - let current_revision = self.pending.revision(); - if current_revision != revision { - return BuildApplyOutcome::RejectedPendingRevisionStale { - sequence_id: ticket.sequence_id, - ticket_revision: revision, - current_revision, - }; - } - - { - self.pending.sequence.set_execution_outcome(execution_outcome); - self.pending.sequence.set_cached_reads(cached_reads); - self.pending.mark_revision_applied(current_revision); - trace!( - target: "flashblocks", - block_number = self.pending.sequence.block_number(), - ticket = ?ticket, - has_computed_state_root = execution_outcome.is_some(), - "Updated pending sequence with build results" - ); - } - BuildApplyOutcome::AppliedPending - } - SequenceSnapshot::Cached => { - if let Some((cached, _)) = self.cached_entry_mut_by_id(ticket.sequence_id) { - let (needs_rebroadcast, rebroadcast_sequence) = { - // Only re-broadcast if we computed new information (state_root was - // missing). If sequencer already provided - // state_root, we already broadcast in - // insert_flashblock, so skip re-broadcast to avoid duplicate FCU calls. - let needs_rebroadcast = - execution_outcome.is_some() && cached.execution_outcome().is_none(); - - cached.set_execution_outcome(execution_outcome); - - let rebroadcast_sequence = needs_rebroadcast.then_some(cached.clone()); - (needs_rebroadcast, rebroadcast_sequence) - }; - self.applied_cached_sequences.insert(ticket.sequence_id); - - if let Some(sequence) = rebroadcast_sequence - && self.block_broadcaster.receiver_count() > 0 - { - trace!( - target: "flashblocks", - block_number = sequence.block_number(), - ticket = ?ticket, - "Re-broadcasting sequence with computed state_root" - ); - let _ = self.block_broadcaster.send(sequence); - } - BuildApplyOutcome::AppliedCached { rebroadcasted: needs_rebroadcast } - } else { - BuildApplyOutcome::RejectedCachedSequenceMissing { - sequence_id: ticket.sequence_id, - } - } - } - } - } - - /// Returns the earliest block number in the pending or cached sequences. - pub(crate) fn earliest_block_number(&self) -> Option { - match (self.pending.sequence.block_number(), self.cached_min_block_number) { - (Some(pending_block), Some(cache_min)) => Some(cache_min.min(pending_block)), - (Some(pending_block), None) => Some(pending_block), - (None, Some(cache_min)) => Some(cache_min), - (None, None) => None, - } - } - - /// Returns the latest block number in the pending or cached sequences. - pub(crate) fn latest_block_number(&self) -> Option { - // Pending is always the latest if it exists - if let Some(pending_block) = self.pending.sequence.block_number() { - return Some(pending_block); - } - - // Fall back to cache - self.completed_cache.iter().map(|(seq, _)| seq.block_number()).max() - } - - /// Returns the tracked block fingerprint for the given block number from pending or cached - /// sequences, if available. - fn tracked_fingerprint_for_block(&self, block_number: u64) -> Option { - // Check pending sequence - if self.pending.sequence.block_number() == Some(block_number) { - let base = self.pending.sequence.payload_base()?; - let last_flashblock = self.pending.sequence.last_flashblock()?; - let tx_hashes = self.pending.tx_hashes(); - return Some(TrackedBlockFingerprint { - block_number, - block_hash: last_flashblock.diff.block_hash, - parent_hash: base.parent_hash, - tx_hashes, - }); - } - - // Check cached sequences (newest first). Multiple payload variants for the same block - // number can coexist in cache; reorg checks must use the newest tracked variant. - for (seq, txs) in self.completed_cache.iter().rev() { - if seq.block_number() == block_number { - let tx_hashes = txs.iter().map(|tx| *tx.tx_hash()).collect(); - return Some(TrackedBlockFingerprint { - block_number, - block_hash: seq.last().diff.block_hash, - parent_hash: seq.payload_base().parent_hash, - tx_hashes, - }); - } - } - - None - } - - /// Processes a canonical block and reconciles pending state. - /// - /// This method determines how to handle the pending flashblock state when a new - /// canonical block arrives. It uses the [`CanonicalBlockReconciler`] to decide - /// the appropriate strategy based on: - /// - Whether canonical has caught up to pending - /// - Whether a reorg was detected (transaction mismatch) - /// - Whether pending is too far ahead of canonical - /// - /// Returns the reconciliation strategy that was applied. - pub(crate) fn process_canonical_block( - &mut self, - canonical: CanonicalBlockFingerprint, - max_depth: u64, - ) -> ReconciliationStrategy { - let canonical_block_number = canonical.block_number; - let earliest = self.earliest_block_number(); - let latest = self.latest_block_number(); - - // Only run reorg detection if we actually track the canonical block number. - let reorg_detected = self - .tracked_fingerprint_for_block(canonical_block_number) - .map(|tracked| ReorgDetector::detect(&tracked, &canonical).is_reorg()) - .unwrap_or(false); - - // Determine reconciliation strategy - let strategy = CanonicalBlockReconciler::reconcile( - earliest, - latest, - canonical_block_number, - max_depth, - reorg_detected, - ); - - match &strategy { - ReconciliationStrategy::CatchUp => { - trace!( - target: "flashblocks", - ?latest, - canonical_block_number, - "Canonical caught up - clearing pending state" - ); - self.clear_all(); - } - ReconciliationStrategy::HandleReorg => { - warn!( - target: "flashblocks", - canonical_block_number, - canonical_tx_count = canonical.tx_hashes.len(), - canonical_parent_hash = ?canonical.parent_hash, - canonical_block_hash = ?canonical.block_hash, - "Reorg detected - clearing pending state" - ); - self.clear_all(); - } - ReconciliationStrategy::DepthLimitExceeded { depth, max_depth } => { - trace!( - target: "flashblocks", - depth, - max_depth, - "Depth limit exceeded - clearing pending state" - ); - self.clear_all(); - } - ReconciliationStrategy::Continue => { - trace!( - target: "flashblocks", - ?earliest, - ?latest, - canonical_block_number, - "Canonical behind pending - continuing" - ); - } - ReconciliationStrategy::NoPendingState => { - trace!( - target: "flashblocks", - canonical_block_number, - "No pending state to reconcile" - ); - } - } - - strategy - } - - /// Clears all pending and cached state. - fn clear_all(&mut self) { - self.pending.clear(); - self.completed_cache.clear(); - self.applied_cached_sequences.clear(); - self.cached_min_block_number = None; - } - - #[cfg(test)] - fn pending_transaction_count(&self) -> usize { - self.pending.transaction_count() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - test_utils::TestFlashBlockFactory, - validation::{CanonicalBlockFingerprint, ReconciliationStrategy}, - }; - use alloy_primitives::B256; - use alloy_rpc_types_engine::PayloadId; - use op_alloy_consensus::OpTxEnvelope; - use reth_optimism_primitives::OpPrimitives; - - fn canonical_for( - manager: &SequenceManager, - block_number: u64, - tx_hashes: Vec, - ) -> CanonicalBlockFingerprint { - if let Some(tracked) = manager.tracked_fingerprint_for_block(block_number) { - CanonicalBlockFingerprint { - block_number, - block_hash: tracked.block_hash, - parent_hash: tracked.parent_hash, - tx_hashes, - } - } else { - CanonicalBlockFingerprint { - block_number, - block_hash: B256::repeat_byte(0xFE), - parent_hash: B256::repeat_byte(0xFD), - tx_hashes, - } - } - } - - #[test] - fn test_sequence_manager_new() { - let manager: SequenceManager = SequenceManager::new(true); - assert_eq!(manager.pending().count(), 0); - } - - #[test] - fn test_insert_flashblock_creates_pending_sequence() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0).unwrap(); - - assert_eq!(manager.pending().count(), 1); - assert_eq!(manager.pending().block_number(), Some(100)); - } - - #[test] - fn test_insert_flashblock_caches_completed_sequence() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build first sequence - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - let fb1 = factory.flashblock_after(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - // Insert new base (index 0) which should finalize and cache previous sequence - let fb2 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb2).unwrap(); - - // New sequence should be pending - assert_eq!(manager.pending().count(), 1); - assert_eq!(manager.pending().block_number(), Some(101)); - assert_eq!(manager.completed_cache.len(), 1); - let (cached_sequence, _txs) = manager.completed_cache.get(0).unwrap(); - assert_eq!(cached_sequence.block_number(), 100); - } - - #[test] - fn test_next_buildable_args_returns_none_when_empty() { - let mut manager: SequenceManager = SequenceManager::new(true); - let local_tip_hash = B256::random(); - let local_tip_timestamp = 1000; - - let args = - manager.next_buildable_args::(local_tip_hash, local_tip_timestamp, None); - assert!(args.is_none()); - } - - #[test] - fn test_next_buildable_args_matches_pending_parent() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb0).unwrap(); - - let args = manager.next_buildable_args::(parent_hash, 1000000, None); - assert!(args.is_some()); - - let build_args = args.unwrap(); - assert_eq!(build_args.last_flashblock_index, 0); - } - - #[test] - fn test_next_buildable_args_returns_none_when_parent_mismatch() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0).unwrap(); - - // Use different parent hash - let wrong_parent = B256::random(); - let args = manager.next_buildable_args::(wrong_parent, 1000000, None); - assert!(args.is_none()); - } - - #[test] - fn test_next_buildable_args_prefers_pending_over_cached() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Create and finalize first sequence - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Create new sequence (finalizes previous) - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - let parent_hash = fb1.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb1).unwrap(); - - // Request with first sequence's parent (should find cached) - let args = manager.next_buildable_args::(parent_hash, 1000000, None); - assert!(args.is_some()); - } - - #[test] - fn test_next_buildable_args_finds_cached_sequence() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build and cache first sequence - let fb0 = factory.flashblock_at(0).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Start new sequence to finalize first - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - // Clear pending by starting another sequence - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - - // Request first sequence's parent - should find in cache - let args = manager.next_buildable_args::(parent_hash, 1000000, None); - assert!(args.is_some()); - } - - #[test] - fn test_next_buildable_args_uses_newest_cached_when_parent_hash_shared() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let shared_parent = B256::repeat_byte(0x44); - let payload_a = PayloadId::new([0xAA; 8]); - let payload_b = PayloadId::new([0xBB; 8]); - - // Sequence A for block 100 (will become cached first). - let fb_a0 = factory - .flashblock_at(0) - .block_number(100) - .parent_hash(shared_parent) - .payload_id(payload_a) - .build(); - manager.insert_flashblock(fb_a0).unwrap(); - - // Sequence B for the same parent hash and block number (different payload id). - // Inserting index 0 finalizes/caches sequence A. - let fb_b0 = factory - .flashblock_at(0) - .block_number(100) - .parent_hash(shared_parent) - .payload_id(payload_b) - .build(); - manager.insert_flashblock(fb_b0.clone()).unwrap(); - - // Finalize/cache sequence B. - let fb_next = factory.flashblock_for_next_block(&fb_b0).build(); - manager.insert_flashblock(fb_next).unwrap(); - - let candidate = manager - .next_buildable_args::(shared_parent, 1_000_000, None) - .expect("shared parent should resolve to a cached sequence"); - - // Newest sequence (B) should be selected deterministically. - assert_eq!(candidate.ticket.sequence_id.payload_id, payload_b); - assert_eq!(candidate.last_flashblock_hash, fb_b0.diff.block_hash); - } - - #[test] - fn test_next_buildable_args_skips_executed_cached_and_advances_speculative() { - use crate::types::pending_state::PendingBlockState; - use reth_execution_types::BlockExecutionOutput; - use reth_revm::cached::CachedReads; - use std::sync::Arc; - - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Block 100 with three flashblocks. - let fb100_0 = factory.flashblock_at(0).build(); - let local_tip_hash = fb100_0.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb100_0.clone()).unwrap(); - let fb100_1 = factory.flashblock_after(&fb100_0).build(); - manager.insert_flashblock(fb100_1.clone()).unwrap(); - let fb100_2 = factory.flashblock_after(&fb100_1).build(); - manager.insert_flashblock(fb100_2.clone()).unwrap(); - - // First flashblock of block 101 finalizes block 100 into cache. - let fb101_0 = factory.flashblock_for_next_block(&fb100_2).build(); - manager.insert_flashblock(fb101_0.clone()).unwrap(); - - // First build picks canonical-attached cached block 100. - let first = manager - .next_buildable_args::(local_tip_hash, 1_000_000, None) - .expect("cached block should be buildable first"); - assert!(matches!(first.ticket.snapshot, SequenceSnapshot::Cached)); - assert_eq!(first.base.block_number, fb100_0.block_number()); - - // Mark cached block 100 as executed. - let applied = manager.apply_build_outcome( - first.ticket, - Some(SequenceExecutionOutcome { - block_hash: B256::repeat_byte(0x33), - state_root: B256::repeat_byte(0x44), - }), - CachedReads::default(), - ); - assert!(matches!( - applied, - BuildApplyOutcome::AppliedCached { rebroadcasted: true | false } - )); - - // Speculative state for block 100 should unlock block 101/index0. - let pending_state = PendingBlockState:: { - block_hash: fb101_0.base.as_ref().unwrap().parent_hash, - block_number: fb100_0.block_number(), - parent_hash: local_tip_hash, - canonical_anchor_hash: local_tip_hash, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - let second = manager - .next_buildable_args(local_tip_hash, 1_000_000, Some(pending_state)) - .expect("speculative pending block should be buildable next"); - assert!(matches!(second.ticket.snapshot, SequenceSnapshot::Pending { .. })); - assert_eq!(second.base.block_number, fb101_0.block_number()); - assert!(second.pending_parent.is_some()); - } - - #[test] - fn test_cached_sequence_with_provided_state_root_not_reselected_after_apply() { - use reth_revm::cached::CachedReads; - - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - let provided_root = B256::repeat_byte(0xA5); - - // Block 100 sequence has non-zero state root from sequencer. - let fb100_0 = factory.flashblock_at(0).state_root(provided_root).build(); - let local_tip_hash = fb100_0.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb100_0.clone()).unwrap(); - - let fb100_1 = factory.flashblock_after(&fb100_0).state_root(provided_root).build(); - manager.insert_flashblock(fb100_1.clone()).unwrap(); - - let fb100_2 = factory.flashblock_after(&fb100_1).state_root(provided_root).build(); - manager.insert_flashblock(fb100_2.clone()).unwrap(); - - // First flashblock of block 101 finalizes block 100 into cache. - let fb101_0 = factory.flashblock_for_next_block(&fb100_2).build(); - manager.insert_flashblock(fb101_0).unwrap(); - - let candidate = manager - .next_buildable_args::(local_tip_hash, 1_000_000, None) - .expect("cached sequence should be buildable once"); - assert!(matches!(candidate.ticket.snapshot, SequenceSnapshot::Cached)); - assert!( - !candidate.compute_state_root, - "non-zero sequencer root should skip local root compute" - ); - - let applied = manager.apply_build_outcome(candidate.ticket, None, CachedReads::default()); - assert!(matches!(applied, BuildApplyOutcome::AppliedCached { rebroadcasted: false })); - - let repeated = manager.next_buildable_args::(local_tip_hash, 1_000_000, None); - assert!( - repeated.is_none(), - "cached sequence with provided state root must not be reselected after apply" - ); - } - - #[test] - fn test_delayed_canonical_allows_speculative_next_block_index_zero() { - use crate::types::pending_state::PendingBlockState; - use reth_execution_types::BlockExecutionOutput; - use reth_revm::cached::CachedReads; - use std::sync::Arc; - - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Canonical tip is block 9. Flashblocks for block 10 all build on block 9. - let canonical_9_hash = B256::repeat_byte(0x09); - let fb10_0 = factory - .flashblock_at(0) - .block_number(10) - .parent_hash(canonical_9_hash) - .block_hash(B256::repeat_byte(0x10)) - .build(); - manager.insert_flashblock(fb10_0.clone()).unwrap(); - - let fb10_1 = factory.flashblock_after(&fb10_0).block_hash(B256::repeat_byte(0x11)).build(); - manager.insert_flashblock(fb10_1.clone()).unwrap(); - - let fb10_2 = factory.flashblock_after(&fb10_1).block_hash(B256::repeat_byte(0x12)).build(); - manager.insert_flashblock(fb10_2.clone()).unwrap(); - - // First flashblock for block 11 arrives before canonical block 10. - let fb11_0 = - factory.flashblock_for_next_block(&fb10_2).block_hash(B256::repeat_byte(0x20)).build(); - manager.insert_flashblock(fb11_0.clone()).unwrap(); - - // Build block 10 first from canonical tip (cached canonical-attached sequence). - let block10_candidate = manager - .next_buildable_args::(canonical_9_hash, 1_000_000, None) - .expect("block 10 should be buildable from canonical tip"); - assert_eq!(block10_candidate.base.block_number, 10); - assert!(matches!(block10_candidate.ticket.snapshot, SequenceSnapshot::Cached)); - - let applied = manager.apply_build_outcome( - block10_candidate.ticket, - Some(SequenceExecutionOutcome { - block_hash: fb11_0.base.as_ref().unwrap().parent_hash, - state_root: B256::repeat_byte(0xAA), - }), - CachedReads::default(), - ); - assert!(matches!( - applied, - BuildApplyOutcome::AppliedCached { rebroadcasted: true | false } - )); - - // Speculative state produced by block 10 should unlock block 11/index 0 - // even though canonical block 10 has not arrived yet. - let pending_state_10 = PendingBlockState:: { - block_hash: fb11_0.base.as_ref().unwrap().parent_hash, - block_number: 10, - parent_hash: canonical_9_hash, - canonical_anchor_hash: canonical_9_hash, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - let before_canonical_10 = manager - .next_buildable_args(canonical_9_hash, 1_000_000, Some(pending_state_10.clone())) - .expect("block 11/index0 should be buildable speculatively before canonical block 10"); - assert_eq!(before_canonical_10.base.block_number, 11); - assert!(before_canonical_10.pending_parent.is_some()); - assert_eq!( - before_canonical_10.pending_parent.as_ref().unwrap().canonical_anchor_hash, - canonical_9_hash - ); - - // Canonical block 10 arrives later: strategy must be Continue (do not clear pending state). - let strategy = manager.process_canonical_block(canonical_for(&manager, 10, vec![]), 64); - assert_eq!(strategy, ReconciliationStrategy::Continue); - - // Block 11/index0 must remain buildable after delayed canonical block 10. - let after_canonical_10 = manager - .next_buildable_args(canonical_9_hash, 1_000_000, Some(pending_state_10)) - .expect("block 11/index0 should remain buildable after delayed canonical block 10"); - assert_eq!(after_canonical_10.base.block_number, 11); - assert!(after_canonical_10.pending_parent.is_some()); - } - - #[test] - fn test_cached_entry_lookup_is_exact_by_sequence_id() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let shared_parent = B256::repeat_byte(0x55); - let payload_a = PayloadId::new([0x0A; 8]); - let payload_b = PayloadId::new([0x0B; 8]); - - let fb_a0 = factory - .flashblock_at(0) - .block_number(100) - .parent_hash(shared_parent) - .payload_id(payload_a) - .build(); - manager.insert_flashblock(fb_a0).unwrap(); - - let fb_b0 = factory - .flashblock_at(0) - .block_number(100) - .parent_hash(shared_parent) - .payload_id(payload_b) - .build(); - manager.insert_flashblock(fb_b0.clone()).unwrap(); - - // Finalize/cache sequence B. - let fb_next = factory.flashblock_for_next_block(&fb_b0).build(); - manager.insert_flashblock(fb_next).unwrap(); - - let seq_a_id = - SequenceId { block_number: 100, payload_id: payload_a, parent_hash: shared_parent }; - let seq_b_id = - SequenceId { block_number: 100, payload_id: payload_b, parent_hash: shared_parent }; - - let (seq_a, _) = manager - .cached_entry_mut_by_id(seq_a_id) - .expect("sequence A should be found by exact id"); - assert_eq!(seq_a.payload_id(), payload_a); - - let (seq_b, _) = manager - .cached_entry_mut_by_id(seq_b_id) - .expect("sequence B should be found by exact id"); - assert_eq!(seq_b.payload_id(), payload_b); - } - - #[test] - fn test_reorg_detection_uses_newest_cached_variant_for_block_number() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let shared_parent = B256::repeat_byte(0x66); - let payload_a = PayloadId::new([0x1A; 8]); - let payload_b = PayloadId::new([0x1B; 8]); - - // Sequence A for block 100 (cached first). - let fb_a0 = factory - .flashblock_at(0) - .block_number(100) - .parent_hash(shared_parent) - .payload_id(payload_a) - .block_hash(B256::repeat_byte(0xA1)) - .build(); - manager.insert_flashblock(fb_a0).unwrap(); - - // Sequence B for the same block number/parent (cached second = newest). - let fb_b0 = factory - .flashblock_at(0) - .block_number(100) - .parent_hash(shared_parent) - .payload_id(payload_b) - .block_hash(B256::repeat_byte(0xB1)) - .build(); - manager.insert_flashblock(fb_b0.clone()).unwrap(); - - // Finalize/cache B and start pending block 101. - let fb_next = factory.flashblock_for_next_block(&fb_b0).build(); - manager.insert_flashblock(fb_next).unwrap(); - - let tracked = manager - .tracked_fingerprint_for_block(100) - .expect("tracked fingerprint for block 100 should exist"); - assert_eq!( - tracked.block_hash, fb_b0.diff.block_hash, - "reorg detection must use newest cached variant for a shared block number" - ); - - // Canonical matches newest variant B; this must not be treated as reorg. - let canonical = CanonicalBlockFingerprint { - block_number: 100, - block_hash: fb_b0.diff.block_hash, - parent_hash: shared_parent, - tx_hashes: tracked.tx_hashes, - }; - - let strategy = manager.process_canonical_block(canonical, 64); - assert_eq!(strategy, ReconciliationStrategy::Continue); - assert_eq!(manager.pending().block_number(), Some(101)); - assert!(!manager.completed_cache.is_empty()); - } - - #[test] - fn test_on_build_complete_ignores_unknown_sequence_id() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build one cached sequence and one pending sequence. - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - assert_eq!(manager.completed_cache.len(), 1); - assert!(manager.completed_cache.get(0).unwrap().0.execution_outcome().is_none()); - - let pending_parent = manager.pending().payload_base().unwrap().parent_hash; - let before = manager - .next_buildable_args::(pending_parent, 1_000_000, None) - .expect("pending sequence should be buildable"); - assert!(before.cached_state.is_none(), "pending sequence must start without cached reads"); - - let cached = &manager.completed_cache.get(0).unwrap().0; - let stale_payload = if cached.payload_id() == PayloadId::new([0xEE; 8]) { - PayloadId::new([0xEF; 8]) - } else { - PayloadId::new([0xEE; 8]) - }; - let stale_id = SequenceId { - block_number: cached.block_number(), - payload_id: stale_payload, - parent_hash: cached.payload_base().parent_hash, - }; - let stale_ticket = BuildTicket::cached(stale_id); - - let applied = manager.apply_build_outcome( - stale_ticket, - Some(SequenceExecutionOutcome { - block_hash: B256::repeat_byte(0x11), - state_root: B256::repeat_byte(0x22), - }), - reth_revm::cached::CachedReads::default(), - ); - assert!(matches!(applied, BuildApplyOutcome::RejectedCachedSequenceMissing { .. })); - - // Unknown sequence IDs must never mutate tracked pending/cached state. - let after = manager - .next_buildable_args::(pending_parent, 1_000_000, None) - .expect("pending sequence should remain buildable"); - assert!(after.cached_state.is_none(), "stale completion must not attach cached reads"); - - // Finalize current pending sequence and ensure no synthetic execution outcome was injected. - let pending_block_number = manager.pending().block_number().unwrap(); - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - let finalized_pending = manager - .completed_cache - .iter() - .find(|(seq, _)| seq.block_number() == pending_block_number) - .expect("pending sequence should be finalized into cache") - .0 - .clone(); - assert!(finalized_pending.execution_outcome().is_none()); - - assert!(manager.completed_cache.get(0).unwrap().0.execution_outcome().is_none()); - } - - #[test] - fn test_pending_build_ticket_rejects_stale_revision() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb0.clone()).unwrap(); - - let first_candidate = manager - .next_buildable_args::(parent_hash, 1_000_000, None) - .expect("initial pending sequence should be buildable"); - let stale_ticket = first_candidate.ticket; - - // Pending sequence advances while the old build would be in-flight. - let fb1 = factory.flashblock_after(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - let stale_applied = manager.apply_build_outcome( - stale_ticket, - Some(SequenceExecutionOutcome { - block_hash: B256::repeat_byte(0x31), - state_root: B256::repeat_byte(0x32), - }), - reth_revm::cached::CachedReads::default(), - ); - assert!( - matches!(stale_applied, BuildApplyOutcome::RejectedPendingRevisionStale { .. }), - "stale pending ticket must be rejected" - ); - - // Fresh ticket for the current revision should still apply. - let fresh_candidate = manager - .next_buildable_args::(parent_hash, 1_000_000, None) - .expect("advanced pending sequence should remain buildable"); - assert_eq!(fresh_candidate.last_flashblock_hash, fb1.diff.block_hash); - assert!(fresh_candidate.cached_state.is_none()); - - let fresh_applied = manager.apply_build_outcome( - fresh_candidate.ticket, - Some(SequenceExecutionOutcome { - block_hash: B256::repeat_byte(0x41), - state_root: B256::repeat_byte(0x42), - }), - reth_revm::cached::CachedReads::default(), - ); - assert!(matches!(fresh_applied, BuildApplyOutcome::AppliedPending)); - - let with_same_revision = - manager.next_buildable_args::(parent_hash, 1_000_000, None); - assert!( - with_same_revision.is_none(), - "applied pending revision must not be rebuilt until sequence revision advances" - ); - - // Once pending data advances, the next revision should be buildable and use cached reads. - let fb2 = factory.flashblock_after(&fb1).build(); - manager.insert_flashblock(fb2.clone()).unwrap(); - - let with_cached_state = manager - .next_buildable_args::(parent_hash, 1_000_000, None) - .expect("pending sequence should be buildable after revision advances"); - assert_eq!(with_cached_state.last_flashblock_hash, fb2.diff.block_hash); - assert!( - with_cached_state.cached_state.is_some(), - "fresh completion should attach cached reads once pending revision advances" - ); - } - - #[test] - fn test_compute_state_root_logic_near_expected_final() { - let mut manager: SequenceManager = SequenceManager::new(true); - let block_time = 2u64; - let factory = TestFlashBlockFactory::new().with_block_time(block_time); - - // Create sequence with zero state root (needs computation) - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - let base_timestamp = fb0.base.as_ref().unwrap().timestamp; - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Add flashblocks up to expected final index (2000ms / 200ms = 10) - for i in 1..=9 { - let fb = factory.flashblock_after(&fb0).index(i).state_root(B256::ZERO).build(); - manager.insert_flashblock(fb).unwrap(); - } - - // Request with proper timing - should compute state root for index 9 - let args = manager.next_buildable_args::( - parent_hash, - base_timestamp - block_time, - None, - ); - assert!(args.is_some()); - assert!(args.unwrap().compute_state_root); - } - - #[test] - fn test_no_compute_state_root_when_provided_by_sequencer() { - let mut manager: SequenceManager = SequenceManager::new(true); - let block_time = 2u64; - let factory = TestFlashBlockFactory::new().with_block_time(block_time); - - // Create sequence with non-zero state root (provided by sequencer) - let fb0 = factory.flashblock_at(0).state_root(B256::random()).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - let base_timestamp = fb0.base.as_ref().unwrap().timestamp; - manager.insert_flashblock(fb0).unwrap(); - - let args = manager.next_buildable_args::( - parent_hash, - base_timestamp - block_time, - None, - ); - assert!(args.is_some()); - assert!(!args.unwrap().compute_state_root); - } - - #[test] - fn test_no_compute_state_root_when_disabled() { - let mut manager: SequenceManager = SequenceManager::new(false); - let block_time = 2u64; - let factory = TestFlashBlockFactory::new().with_block_time(block_time); - - // Create sequence with zero state root (needs computation) - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - let base_timestamp = fb0.base.as_ref().unwrap().timestamp; - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Add flashblocks up to expected final index (2000ms / 200ms = 10) - for i in 1..=9 { - let fb = factory.flashblock_after(&fb0).index(i).state_root(B256::ZERO).build(); - manager.insert_flashblock(fb).unwrap(); - } - - // Request with proper timing - should compute state root for index 9 - let args = manager.next_buildable_args::( - parent_hash, - base_timestamp - block_time, - None, - ); - assert!(args.is_some()); - assert!(!args.unwrap().compute_state_root); - } - - #[test] - fn test_compute_state_root_with_timestamp_skew_does_not_underflow() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - let base_timestamp = fb0.base.as_ref().unwrap().timestamp; - manager.insert_flashblock(fb0).unwrap(); - - // Local tip timestamp can be ahead briefly in skewed/out-of-order conditions. - // This should not panic due to arithmetic underflow. - let args = - manager.next_buildable_args::(parent_hash, base_timestamp + 1, None); - assert!(args.is_some()); - } - - #[test] - fn test_cache_ring_buffer_evicts_oldest() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Fill cache with 4 sequences (cache size is 3, so oldest should be evicted) - let mut last_fb = factory.flashblock_at(0).build(); - manager.insert_flashblock(last_fb.clone()).unwrap(); - - for _ in 0..3 { - last_fb = factory.flashblock_for_next_block(&last_fb).build(); - manager.insert_flashblock(last_fb.clone()).unwrap(); - } - - // The first sequence should have been evicted, so we can't build it - let first_parent = factory.flashblock_at(0).build().base.unwrap().parent_hash; - let args = manager.next_buildable_args::(first_parent, 1000000, None); - // Should not find it (evicted from ring buffer) - assert!(args.is_none()); - } - - // ==================== Canonical Block Reconciliation Tests ==================== - - #[test] - fn test_process_canonical_block_no_pending_state() { - let mut manager: SequenceManager = SequenceManager::new(true); - - // No pending state, should return NoPendingState - let canonical = canonical_for(&manager, 100, vec![]); - let strategy = manager.process_canonical_block(canonical, 10); - assert_eq!(strategy, ReconciliationStrategy::NoPendingState); - } - - #[test] - fn test_process_canonical_block_catchup() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Insert a flashblock sequence for block 100 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0).unwrap(); - - assert_eq!(manager.pending().block_number(), Some(100)); - - // Canonical catches up to block 100 - let canonical = canonical_for(&manager, 100, vec![]); - let strategy = manager.process_canonical_block(canonical, 10); - assert_eq!(strategy, ReconciliationStrategy::CatchUp); - - // Pending state should be cleared - assert!(manager.pending().block_number().is_none()); - } - - #[test] - fn test_process_canonical_block_continue() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Insert flashblocks for block 100-102 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - - // Canonical at 99 (behind pending) - let canonical = canonical_for(&manager, 99, vec![]); - let strategy = manager.process_canonical_block(canonical, 10); - assert_eq!(strategy, ReconciliationStrategy::Continue); - - // Pending state should still exist - assert!(manager.pending().block_number().is_some()); - } - - #[test] - fn test_process_canonical_block_depth_limit_exceeded() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Insert flashblocks for block 100-102 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - - // At this point: earliest=100, latest=102 - // Canonical at 105 with max_depth of 2 (depth = 105 - 100 = 5, which exceeds 2) - // But wait - if canonical >= latest, it's CatchUp. So canonical must be < latest (102). - // Let's use canonical=101, which is < 102 but depth = 101 - 100 = 1 > 0 - let canonical = canonical_for(&manager, 101, vec![]); - let strategy = manager.process_canonical_block(canonical, 0); - assert!(matches!(strategy, ReconciliationStrategy::DepthLimitExceeded { .. })); - - // Pending state should be cleared - assert!(manager.pending().block_number().is_none()); - } - - #[test] - fn test_earliest_and_latest_block_numbers() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Initially no blocks - assert!(manager.earliest_block_number().is_none()); - assert!(manager.latest_block_number().is_none()); - - // Insert first flashblock (block 100) - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - assert_eq!(manager.earliest_block_number(), Some(100)); - assert_eq!(manager.latest_block_number(), Some(100)); - - // Insert next block (block 101) - this caches block 100 - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - assert_eq!(manager.earliest_block_number(), Some(100)); - assert_eq!(manager.latest_block_number(), Some(101)); - - // Insert another block (block 102) - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - - assert_eq!(manager.earliest_block_number(), Some(100)); - assert_eq!(manager.latest_block_number(), Some(102)); - } - - #[test] - fn test_earliest_block_number_tracks_cache_rollover() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2.clone()).unwrap(); - - let fb3 = factory.flashblock_for_next_block(&fb2).build(); - manager.insert_flashblock(fb3.clone()).unwrap(); - - let fb4 = factory.flashblock_for_next_block(&fb3).build(); - manager.insert_flashblock(fb4).unwrap(); - - // Cache size is 3, so block 100 should have been evicted. - assert_eq!(manager.earliest_block_number(), Some(101)); - assert_eq!(manager.latest_block_number(), Some(104)); - } - - // ==================== Speculative Building Tests ==================== - - #[test] - fn test_speculative_build_with_pending_parent_state() { - use crate::types::pending_state::PendingBlockState; - use reth_execution_types::BlockExecutionOutput; - use reth_revm::cached::CachedReads; - use std::sync::Arc; - - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Create a flashblock for block 101 - let fb0 = factory.flashblock_at(0).block_number(101).build(); - // The parent_hash of block 101 should be the hash of block 100 - let block_100_hash = fb0.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb0).unwrap(); - - // Local tip is block 99 (not matching block 100's hash) - let local_tip_hash = B256::random(); - - // Without pending parent state, no args should be returned - let args = manager.next_buildable_args::(local_tip_hash, 1000000, None); - assert!(args.is_none()); - - // Create pending parent state for block 100 (its block_hash matches fb0's parent_hash) - let parent_hash = B256::random(); - let pending_state: PendingBlockState = PendingBlockState { - block_hash: block_100_hash, - block_number: 100, - parent_hash, - canonical_anchor_hash: parent_hash, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - // With pending parent state, should return args for speculative building - let args = manager.next_buildable_args(local_tip_hash, 1000000, Some(pending_state)); - assert!(args.is_some()); - let build_args = args.unwrap(); - assert!(build_args.pending_parent.is_some()); - assert_eq!(build_args.pending_parent.as_ref().unwrap().block_number, 100); - } - - #[test] - fn test_speculative_build_uses_cached_sequence() { - use crate::types::pending_state::PendingBlockState; - use reth_execution_types::BlockExecutionOutput; - use reth_revm::cached::CachedReads; - use std::sync::Arc; - - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Create and cache first sequence for block 100 - let fb0 = factory.flashblock_at(0).build(); - let block_99_hash = fb0.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Create second sequence for block 101 (this caches block 100) - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - // Create third sequence for block 102 (this caches block 101) - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - - // Local tip is some random hash (not matching any sequence parent) - let local_tip_hash = B256::random(); - - // Create pending parent state that matches the cached block 100 sequence's parent - let parent_hash = B256::random(); - let pending_state: PendingBlockState = PendingBlockState { - block_hash: block_99_hash, - block_number: 99, - parent_hash, - canonical_anchor_hash: parent_hash, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - // Should find cached sequence for block 100 (whose parent is block_99_hash) - let args = manager.next_buildable_args(local_tip_hash, 1000000, Some(pending_state)); - assert!(args.is_some()); - let build_args = args.unwrap(); - assert!(build_args.pending_parent.is_some()); - assert_eq!(build_args.base.block_number, 100); - } - - #[test] - fn test_canonical_build_takes_priority_over_speculative() { - use crate::types::pending_state::PendingBlockState; - use reth_execution_types::BlockExecutionOutput; - use reth_revm::cached::CachedReads; - use std::sync::Arc; - - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Create a flashblock for block 100 - let fb0 = factory.flashblock_at(0).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb0).unwrap(); - - // Create pending parent state with a different block hash - let pending_parent_hash = B256::random(); - let pending_state: PendingBlockState = PendingBlockState { - block_hash: B256::repeat_byte(0xAA), - block_number: 99, - parent_hash: pending_parent_hash, - canonical_anchor_hash: pending_parent_hash, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - // Local tip matches the sequence parent (canonical mode should take priority) - let args = manager.next_buildable_args(parent_hash, 1000000, Some(pending_state)); - assert!(args.is_some()); - let build_args = args.unwrap(); - // Should be canonical build (no pending_parent) - assert!(build_args.pending_parent.is_none()); - } - - // ==================== Reconciliation Cache Clearing Tests ==================== - - #[test] - fn test_catchup_clears_all_cached_sequences() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build up cached sequences for blocks 100, 101, 102 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - - // Verify we have cached sequences - assert_eq!(manager.completed_cache.len(), 2); - assert!(manager.pending().block_number().is_some()); - - // Canonical catches up to 102 - should clear everything - let canonical = canonical_for(&manager, 102, vec![]); - let strategy = manager.process_canonical_block(canonical, 10); - assert_eq!(strategy, ReconciliationStrategy::CatchUp); - - // Verify all state is cleared - assert!(manager.pending().block_number().is_none()); - assert_eq!(manager.completed_cache.len(), 0); - } - - #[test] - fn test_reorg_clears_all_cached_sequences() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build pending sequence for block 100 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Add another sequence - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - // Verify we have state - assert!(manager.pending().block_number().is_some()); - assert!(!manager.completed_cache.is_empty()); - - // Simulate reorg at block 100: canonical has different tx than our cached - // We need to insert a tx in the sequence to make reorg detection work - // The reorg detection compares our pending transactions vs canonical - // Since we have no pending transactions (TestFlashBlockFactory creates empty tx lists), - // we need to use a different approach - process with tx hashes that don't match empty - - // Actually, let's verify the state clearing on HandleReorg by checking - // that any non-empty canonical_tx_hashes when we have state triggers reorg - let canonical_tx_hashes = vec![B256::repeat_byte(0xAA)]; - let canonical = canonical_for(&manager, 100, canonical_tx_hashes); - let strategy = manager.process_canonical_block(canonical, 10); - - // Should detect reorg (canonical has txs, we have none for that block) - assert_eq!(strategy, ReconciliationStrategy::HandleReorg); - - // Verify all state is cleared - assert!(manager.pending().block_number().is_none()); - assert_eq!(manager.completed_cache.len(), 0); - } - - #[test] - fn test_depth_limit_exceeded_clears_all_state() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build sequences for blocks 100-102 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - - // Verify state exists - assert_eq!(manager.earliest_block_number(), Some(100)); - assert_eq!(manager.latest_block_number(), Some(102)); - - // Canonical at 101 with max_depth of 0 (depth = 101 - 100 = 1 > 0) - // Since canonical < latest (102), this should trigger depth limit exceeded - let canonical = canonical_for(&manager, 101, vec![]); - let strategy = manager.process_canonical_block(canonical, 0); - assert!(matches!(strategy, ReconciliationStrategy::DepthLimitExceeded { .. })); - - // Verify all state is cleared - assert!(manager.pending().block_number().is_none()); - assert_eq!(manager.completed_cache.len(), 0); - } - - #[test] - fn test_continue_preserves_all_state() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build sequences for blocks 100-102 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - - let cached_count = manager.completed_cache.len(); - - // Canonical at 99 (behind pending) with reasonable depth limit - let canonical = canonical_for(&manager, 99, vec![]); - let strategy = manager.process_canonical_block(canonical, 10); - assert_eq!(strategy, ReconciliationStrategy::Continue); - - // Verify state is preserved - assert_eq!(manager.pending().block_number(), Some(102)); - assert_eq!(manager.completed_cache.len(), cached_count); - } - - #[test] - fn test_clear_all_removes_pending_and_cache() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build up state - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - // Verify state exists - assert!(manager.pending().block_number().is_some()); - assert!(!manager.completed_cache.is_empty()); - assert!(manager.pending_transaction_count() > 0 || manager.pending().count() > 0); - - // Clear via catchup - let canonical = canonical_for(&manager, 101, vec![]); - manager.process_canonical_block(canonical, 10); - - // Verify complete clearing - assert!(manager.pending().block_number().is_none()); - assert_eq!(manager.pending().count(), 0); - assert!(manager.completed_cache.is_empty()); - assert_eq!(manager.pending_transaction_count(), 0); - } - - // ==================== Tracked Fingerprint Tests ==================== - - #[test] - fn test_tracked_fingerprint_returns_none_for_unknown_block() { - let manager: SequenceManager = SequenceManager::new(true); - - // No flashblocks inserted, should return none - let fingerprint = manager.tracked_fingerprint_for_block(100); - assert!(fingerprint.is_none()); - } - - #[test] - fn test_no_false_reorg_for_untracked_block() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build pending sequence for block 100 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Add another sequence for block 101 - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - // Verify we have state for blocks 100 (cached) and 101 (pending) - assert_eq!(manager.earliest_block_number(), Some(100)); - assert_eq!(manager.latest_block_number(), Some(101)); - - // Process canonical block 99 (not tracked) with transactions - // This should NOT trigger reorg detection because we don't track block 99 - let canonical_tx_hashes = vec![B256::repeat_byte(0xAA)]; - let canonical = canonical_for(&manager, 99, canonical_tx_hashes); - let strategy = manager.process_canonical_block(canonical, 10); - - // Should continue (not reorg) because block 99 is outside our tracked window - assert_eq!(strategy, ReconciliationStrategy::Continue); - - // State should be preserved - assert_eq!(manager.pending().block_number(), Some(101)); - assert!(!manager.completed_cache.is_empty()); - } - - #[test] - fn test_reorg_detected_for_tracked_block_with_different_txs() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build pending sequence for block 100 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Add another sequence for block 101 - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - // Process canonical block 100 (which IS tracked) with different transactions - // Our tracked block 100 has empty tx list, canonical has non-empty - let canonical_tx_hashes = vec![B256::repeat_byte(0xAA)]; - let canonical = canonical_for(&manager, 100, canonical_tx_hashes); - let strategy = manager.process_canonical_block(canonical, 10); - - // Should detect reorg because we track block 100 and txs don't match - assert_eq!(strategy, ReconciliationStrategy::HandleReorg); - - // State should be cleared - assert!(manager.pending().block_number().is_none()); - assert!(manager.completed_cache.is_empty()); - } - - #[test] - fn test_reorg_detected_for_tracked_block_with_parent_hash_mismatch() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build pending sequence for block 100 and cache it by starting block 101. - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - let tracked = manager - .tracked_fingerprint_for_block(100) - .expect("tracked fingerprint for block 100 should exist"); - let canonical = CanonicalBlockFingerprint { - block_number: 100, - block_hash: tracked.block_hash, - parent_hash: B256::repeat_byte(0xAA), // Different parent hash, identical txs. - tx_hashes: tracked.tx_hashes, - }; - - let strategy = manager.process_canonical_block(canonical, 10); - assert_eq!(strategy, ReconciliationStrategy::HandleReorg); - assert!(manager.pending().block_number().is_none()); - assert!(manager.completed_cache.is_empty()); - } - - #[test] - fn test_reorg_detected_for_tracked_block_with_block_hash_mismatch() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build pending sequence for block 100 and cache it by starting block 101. - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - let tracked = manager - .tracked_fingerprint_for_block(100) - .expect("tracked fingerprint for block 100 should exist"); - let canonical = CanonicalBlockFingerprint { - block_number: 100, - block_hash: B256::repeat_byte(0xBB), // Different block hash, identical parent+txs. - parent_hash: tracked.parent_hash, - tx_hashes: tracked.tx_hashes, - }; - - let strategy = manager.process_canonical_block(canonical, 10); - assert_eq!(strategy, ReconciliationStrategy::HandleReorg); - assert!(manager.pending().block_number().is_none()); - assert!(manager.completed_cache.is_empty()); - } - - #[test] - fn test_tracked_fingerprint_for_pending_block() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Create flashblock without transactions (empty tx list is valid) - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0).unwrap(); - - // Should find tracked fingerprint for block 100 - let fingerprint = manager.tracked_fingerprint_for_block(100); - assert!(fingerprint.is_some()); - assert!(fingerprint.unwrap().tx_hashes.is_empty()); // No transactions in this flashblock - } - - #[test] - fn test_tracked_fingerprint_for_cached_block() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Create first flashblock for block 100 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Create second flashblock for block 101 (caches block 100) - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - // Should find tracked fingerprint for cached block 100 - let fingerprint = manager.tracked_fingerprint_for_block(100); - assert!(fingerprint.is_some()); - assert!(fingerprint.as_ref().unwrap().tx_hashes.is_empty()); - - // Should find tracked fingerprint for pending block 101 - let fingerprint = manager.tracked_fingerprint_for_block(101); - assert!(fingerprint.is_some()); - assert!(fingerprint.as_ref().unwrap().tx_hashes.is_empty()); - } -} From 6666f70cce1a7913ef7eb2962a054f6e977e6328 Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 13 Mar 2026 18:19:35 +0800 Subject: [PATCH 24/76] Fix merge errors --- Cargo.lock | 8 +-- Cargo.toml | 154 ++++++++++++++++++++++++++++++++--------------------- 2 files changed, 96 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8af6de58..75657ccf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12962,9 +12962,9 @@ dependencies = [ [[package]] name = "unicode-width" -version = "0.2.2" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" [[package]] name = "unicode-xid" @@ -14225,10 +14225,10 @@ dependencies = [ "serde", "serde_json", "test-case", - "thiserror 1.0.69", + "thiserror 2.0.18", "tokio", "tokio-stream", - "tokio-tungstenite", + "tokio-tungstenite 0.26.2", "tokio-util", "tracing", "url", diff --git a/Cargo.toml b/Cargo.toml index 8ad8737c..ea693877 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -80,62 +80,71 @@ xlayer-trace-monitor = { git = "https://github.com/okx/xlayer-toolkit", rev = "d # These are chain-agnostic crates from the base reth framework. # https://github.com/paradigmxyz/reth/tree/v1.11.0 # ============================================================================== -reth = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-basic-payload-builder = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-chainspec = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-cli = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-cli-commands = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-cli-util = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-db = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-db-api = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-db-models = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-engine-primitives = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-ethereum-forks = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-evm = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-execution-types = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-ipc = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-metrics = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-node-api = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-node-builder = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-node-core = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-node-types = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-optimism-chainspec = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-optimism-cli = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-optimism-consensus = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-optimism-evm = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-optimism-forks = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-optimism-node = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-optimism-payload-builder = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-optimism-primitives = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-optimism-rpc = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-optimism-txpool = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-payload-builder = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-payload-builder-primitives = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-payload-primitives = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-payload-util = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-primitives = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-primitives-traits = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-provider = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-revm = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-rpc = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-rpc-api = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-rpc-convert = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-rpc-engine-api = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-rpc-eth-api = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-rpc-eth-types = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-rpc-layer = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-rpc-server-types = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-storage-api = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-tasks = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-testing-utils = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-tokio-util = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-tracing = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-transaction-pool = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-chain-state = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-errors = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-optimism-flashblocks = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-optimism-storage = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } -reth-trie = { git = "https://github.com/okx/reth", rev = "b6a31f31af91abdecb475f2a991906bff9bbef7f" } +reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-basic-payload-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-chain-state = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth-cli = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-cli-commands = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-cli-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth-db-models = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth-engine-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-ethereum-forks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth-evm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth-exex = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-execution-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth-ipc = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-metrics = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-node-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-node-core = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-node-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-payload-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-payload-builder-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-payload-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-payload-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-primitives-traits = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth-provider = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-revm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth-rpc = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-rpc-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-rpc-convert = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-rpc-engine-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-rpc-eth-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-rpc-eth-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth-rpc-layer = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-rpc-server-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-storage-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth-tasks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-testing-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-tokio-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-tracing = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth-tracing-otlp = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-trie = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-trie-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } + +# ============================================================================== +# Reth Optimism Dependencies (from local optimism/rust op-reth) +# These are OP-chain-specific crates, sourced from the optimism monorepo. +# ============================================================================== +reth-optimism-chainspec = { path = "deps/optimism/rust/op-reth/crates/chainspec/", default-features = false } +reth-optimism-cli = { path = "deps/optimism/rust/op-reth/crates/cli/", default-features = false } +reth-optimism-consensus = { path = "deps/optimism/rust/op-reth/crates/consensus/", default-features = false } +reth-optimism-evm = { path = "deps/optimism/rust/op-reth/crates/evm/", default-features = false } +reth-optimism-flashblocks = { path = "deps/optimism/rust/op-reth/crates/flashblocks/" } +reth-optimism-forks = { path = "deps/optimism/rust/op-reth/crates/hardforks/", default-features = false } +reth-optimism-node = { path = "deps/optimism/rust/op-reth/crates/node/" } +reth-optimism-payload-builder = { path = "deps/optimism/rust/op-reth/crates/payload/" } +reth-optimism-primitives = { path = "deps/optimism/rust/op-reth/crates/primitives/", default-features = false } +reth-optimism-rpc = { path = "deps/optimism/rust/op-reth/crates/rpc/" } +reth-optimism-storage = { path = "deps/optimism/rust/op-reth/crates/storage/" } +reth-optimism-txpool = { path = "deps/optimism/rust/op-reth/crates/txpool/" } # ============================================================================== # Revm Dependencies (follows upstream reth) @@ -180,13 +189,14 @@ alloy-transport-http = { version = "~1.6", features = [ alloy-transport = { version = "~1.6" } alloy-hardforks = { version = "~0.4", default-features = false } -# op-alloy -alloy-op-evm = { version = "0.26.3", default-features = false } -op-alloy-consensus = { version = "0.23.1", default-features = false } +# op-alloy (from local optimism/rust, with version for compat) +alloy-op-evm = { version = "0.26.3", path = "deps/optimism/rust/alloy-op-evm/", default-features = false } +alloy-op-hardforks = { version = "0.4.7", path = "deps/optimism/rust/alloy-op-hardforks/", default-features = false } +op-alloy-consensus = { version = "0.23.1", path = "deps/optimism/rust/op-alloy/crates/consensus", default-features = false } op-alloy-flz = { version = "0.13.1", default-features = false } -op-alloy-network = { version = "0.23.1", default-features = false } -op-alloy-rpc-types = { version = "0.23.1", default-features = false } -op-alloy-rpc-types-engine = { version = "0.23.1", default-features = false } +op-alloy-network = { version = "0.23.1", path = "deps/optimism/rust/op-alloy/crates/network", default-features = false } +op-alloy-rpc-types = { version = "0.23.1", path = "deps/optimism/rust/op-alloy/crates/rpc-types", default-features = false } +op-alloy-rpc-types-engine = { version = "0.23.1", path = "deps/optimism/rust/op-alloy/crates/rpc-types-engine", default-features = false } # ============================================================================== # Support Dependencies @@ -240,3 +250,23 @@ multiaddr = "0.18" # ============================================================================== # Patch Section +# Required because paradigm reth v1.11.0 (git) transitively depends on +# op-alloy crates from crates.io. We redirect them to the local optimism +# workspace versions to avoid duplicate/conflicting types. +# +# Duplicated by (from reth git): +# op-alloy-consensus: reth-codecs, reth-db-api, reth-primitives-traits, reth-rpc-convert +# op-alloy-rpc-types-engine: reth-payload-primitives, reth-engine-local +# op-alloy-network: reth-rpc-convert +# op-alloy-rpc-types: reth-rpc-convert +# Duplicated by (from crates.io): +# alloy-op-hardforks: alloy-evm +# ============================================================================== +[patch.crates-io] +op-alloy-consensus = { path = "deps/optimism/rust/op-alloy/crates/consensus" } +op-alloy-rpc-types-engine = { path = "deps/optimism/rust/op-alloy/crates/rpc-types-engine" } +op-alloy-network = { path = "deps/optimism/rust/op-alloy/crates/network" } +op-alloy-rpc-types = { path = "deps/optimism/rust/op-alloy/crates/rpc-types" } +alloy-op-hardforks = { path = "deps/optimism/rust/alloy-op-hardforks/" } +alloy-op-evm = { path = "deps/optimism/rust/alloy-op-evm/" } +op-alloy = { path = "deps/optimism/rust/op-alloy/crates/op-alloy" } From 634be38572a2d022b5c0caeb53a750387ff79666 Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 13 Mar 2026 19:41:47 +0800 Subject: [PATCH 25/76] fix(flashblocks-rpc): use spawn_critical_task for service handles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- crates/flashblocks/src/service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/flashblocks/src/service.rs b/crates/flashblocks/src/service.rs index 35aa75d5..496ba274 100644 --- a/crates/flashblocks/src/service.rs +++ b/crates/flashblocks/src/service.rs @@ -82,7 +82,7 @@ where // Spawn persistence handle let datadir = self.datadir.clone(); let rx = self.subscribe_received_flashblocks(); - self.task_executor.spawn_critical( + self.task_executor.spawn_critical_task( "xlayer-flashblocks-persistence", Box::pin(async move { handle_persistence(rx, datadir).await; @@ -93,7 +93,7 @@ where if self.relay_flashblocks { let rx = self.subscribe_received_flashblocks(); let ws_pub = self.ws_pub.clone(); - self.task_executor.spawn_critical( + self.task_executor.spawn_critical_task( "xlayer-flashblocks-publish", Box::pin(async move { handle_relay_flashblocks(rx, ws_pub).await; From 391da2ca03bb3515c025528cd2c7a9927995eee8 Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 13 Mar 2026 20:50:53 +0800 Subject: [PATCH 26/76] test(flashblocks-rpc): add unit test coverages for caches MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- crates/flashblocks/src/cache/confirm.rs | 481 +++++++++++++++++++ crates/flashblocks/src/cache/pending.rs | 320 +++++-------- crates/flashblocks/src/cache/raw.rs | 604 ++++++++++++++++++++++++ crates/flashblocks/src/cache/utils.rs | 43 ++ 4 files changed, 1256 insertions(+), 192 deletions(-) diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index fbdba3c9..1045088d 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -227,3 +227,484 @@ impl ConfirmCache { count } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::{BlockHeader, Header, Receipt, TxEip7702}; + use alloy_primitives::{Address, Bytes, PrimitiveSignature, B256, U256}; + use op_alloy_consensus::OpTypedTransaction; + use reth_chain_state::{ComputedTrieData, ExecutedBlock}; + use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; + use reth_optimism_primitives::{ + OpBlock, OpBlockBody, OpPrimitives, OpReceipt, OpTransactionSigned, + }; + use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; + use std::sync::Arc; + + type TestConfirmCache = ConfirmCache; + + fn make_executed_block(block_number: u64, parent_hash: B256) -> ExecutedBlock { + let header = Header { number: block_number, parent_hash, ..Default::default() }; + let sealed_header = SealedHeader::seal_slow(header); + let block = OpBlock::new(sealed_header.unseal(), Default::default()); + let sealed_block = SealedBlock::seal_slow(block); + let recovered_block = RecoveredBlock::new_sealed(sealed_block, vec![]); + let execution_output = Arc::new(BlockExecutionOutput { + result: BlockExecutionResult { + receipts: vec![], + requests: Default::default(), + gas_used: 0, + blob_gas_used: 0, + }, + state: Default::default(), + }); + ExecutedBlock::new(Arc::new(recovered_block), execution_output, ComputedTrieData::default()) + } + + fn empty_receipts() -> Arc> { + Arc::new(vec![]) + } + + fn mock_tx(nonce: u64) -> OpTransactionSigned { + let tx = TxEip7702 { + chain_id: 1u64, + nonce, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 21_000, + to: Address::default(), + value: U256::ZERO, + input: Bytes::new(), + access_list: Default::default(), + authorization_list: Default::default(), + }; + let signature = PrimitiveSignature::new(U256::default(), U256::default(), true); + OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature) + } + + fn make_executed_block_with_txs( + block_number: u64, + parent_hash: B256, + nonce_start: u64, + count: usize, + ) -> (ExecutedBlock, Arc>) { + let txs: Vec = + (0..count).map(|i| mock_tx(nonce_start + i as u64)).collect(); + let senders: Vec

= (0..count).map(|_| Address::default()).collect(); + let receipts: Vec = (0..count) + .map(|i| { + OpReceipt::Eip7702(Receipt { + status: true.into(), + cumulative_gas_used: 21_000 * (i as u64 + 1), + logs: vec![], + }) + }) + .collect(); + + let header = Header { number: block_number, parent_hash, ..Default::default() }; + let sealed_header = SealedHeader::seal_slow(header); + let body = OpBlockBody { transactions: txs, ..Default::default() }; + let block = OpBlock::new(sealed_header.unseal(), body); + let sealed_block = SealedBlock::seal_slow(block); + let recovered_block = RecoveredBlock::new_sealed(sealed_block, senders); + let execution_output = Arc::new(BlockExecutionOutput { + result: BlockExecutionResult { + receipts: receipts.clone(), + requests: Default::default(), + gas_used: 21_000 * count as u64, + blob_gas_used: 0, + }, + state: Default::default(), + }); + let executed = ExecutedBlock::new( + Arc::new(recovered_block), + execution_output, + ComputedTrieData::default(), + ); + (executed, Arc::new(receipts)) + } + + #[test] + fn test_confirm_cache_new_is_empty() { + let cache = TestConfirmCache::new(); + assert!(cache.is_empty()); + assert_eq!(cache.len(), 0); + } + + #[test] + fn test_confirm_cache_insert_single_block_increases_len() { + let mut cache = TestConfirmCache::new(); + let block = make_executed_block(1, B256::ZERO); + cache.insert(1, block, empty_receipts()).expect("insert should succeed"); + assert_eq!(cache.len(), 1); + assert!(!cache.is_empty()); + } + + #[test] + fn test_confirm_cache_insert_fails_at_max_capacity() { + let mut cache = TestConfirmCache::new(); + let mut parent = B256::ZERO; + for height in 1..=(DEFAULT_CONFIRM_BLOCK_CACHE_SIZE as u64) { + let block = make_executed_block(height, parent); + let hash = block.recovered_block.hash(); + cache.insert(height, block, empty_receipts()).expect("insert within capacity"); + parent = hash; + } + let overflow = make_executed_block(DEFAULT_CONFIRM_BLOCK_CACHE_SIZE as u64 + 1, parent); + let result = + cache.insert(DEFAULT_CONFIRM_BLOCK_CACHE_SIZE as u64 + 1, overflow, empty_receipts()); + assert!(result.is_err()); + } + + #[test] + fn test_confirm_cache_get_block_by_number_returns_correct_block() { + let mut cache = TestConfirmCache::new(); + let block = make_executed_block(42, B256::ZERO); + cache.insert(42, block, empty_receipts()).expect("insert"); + let result = cache.get_block_by_number(42); + assert!(result.is_some()); + assert_eq!(result.unwrap().block.number(), 42); + } + + #[test] + fn test_confirm_cache_get_block_by_number_returns_none_for_wrong_number() { + let mut cache = TestConfirmCache::new(); + let block = make_executed_block(42, B256::ZERO); + cache.insert(42, block, empty_receipts()).expect("insert"); + assert!(cache.get_block_by_number(43).is_none()); + assert!(cache.get_block_by_number(0).is_none()); + } + + #[test] + fn test_confirm_cache_get_block_by_hash_returns_correct_block() { + let mut cache = TestConfirmCache::new(); + let block = make_executed_block(42, B256::ZERO); + let block_hash = block.recovered_block.hash(); + cache.insert(42, block, empty_receipts()).expect("insert"); + let result = cache.get_block_by_hash(&block_hash); + assert!(result.is_some()); + assert_eq!(result.unwrap().block.number(), 42); + } + + #[test] + fn test_confirm_cache_get_block_by_hash_returns_none_for_unknown_hash() { + let mut cache = TestConfirmCache::new(); + let block = make_executed_block(42, B256::ZERO); + cache.insert(42, block, empty_receipts()).expect("insert"); + assert!(cache.get_block_by_hash(&B256::repeat_byte(0xFF)).is_none()); + } + + #[test] + fn test_confirm_cache_number_for_hash_returns_correct_mapping() { + let mut cache = TestConfirmCache::new(); + let block = make_executed_block(10, B256::ZERO); + let hash = block.recovered_block.hash(); + cache.insert(10, block, empty_receipts()).expect("insert"); + assert_eq!(cache.number_for_hash(&hash), Some(10)); + } + + #[test] + fn test_confirm_cache_hash_for_number_returns_correct_mapping() { + let mut cache = TestConfirmCache::new(); + let block = make_executed_block(10, B256::ZERO); + let expected_hash = block.recovered_block.hash(); + cache.insert(10, block, empty_receipts()).expect("insert"); + assert_eq!(cache.hash_for_number(10), Some(expected_hash)); + } + + #[test] + fn test_confirm_cache_clear_removes_all_entries() { + let mut cache = TestConfirmCache::new(); + let block = make_executed_block(1, B256::ZERO); + cache.insert(1, block, empty_receipts()).expect("insert"); + cache.clear(); + assert!(cache.is_empty()); + assert!(cache.get_block_by_number(1).is_none()); + } + + #[test] + fn test_confirm_cache_flush_up_to_height_removes_entries_at_or_below_height() { + let mut cache = TestConfirmCache::new(); + let mut parent = B256::ZERO; + for height in 1..=5 { + let block = make_executed_block(height, parent); + parent = block.recovered_block.hash(); + cache.insert(height, block, empty_receipts()).expect("insert"); + } + let count = cache.flush_up_to_height(3); + assert_eq!(count, 3); + assert_eq!(cache.len(), 2); + assert!(cache.get_block_by_number(3).is_none()); + assert!(cache.get_block_by_number(4).is_some()); + assert!(cache.get_block_by_number(5).is_some()); + } + + #[test] + fn test_confirm_cache_flush_up_to_height_higher_than_all_removes_all() { + let mut cache = TestConfirmCache::new(); + let mut parent = B256::ZERO; + for height in 1..=3 { + let block = make_executed_block(height, parent); + parent = block.recovered_block.hash(); + cache.insert(height, block, empty_receipts()).expect("insert"); + } + let count = cache.flush_up_to_height(100); + assert_eq!(count, 3); + assert!(cache.is_empty()); + } + + #[test] + fn test_confirm_cache_flush_up_to_height_zero_removes_nothing() { + let mut cache = TestConfirmCache::new(); + let block = make_executed_block(1, B256::ZERO); + cache.insert(1, block, empty_receipts()).expect("insert"); + let count = cache.flush_up_to_height(0); + assert_eq!(count, 0); + assert_eq!(cache.len(), 1); + } + + #[test] + fn test_confirm_cache_flush_removes_hash_indices_for_all_flushed_blocks() { + let mut cache = TestConfirmCache::new(); + let mut parent = B256::ZERO; + let mut hashes = vec![]; + for height in 1..=3 { + let block = make_executed_block(height, parent); + let hash = block.recovered_block.hash(); + hashes.push(hash); + cache.insert(height, block, empty_receipts()).expect("insert"); + parent = hash; + } + cache.flush_up_to_height(2); + assert!(cache.number_for_hash(&hashes[0]).is_none()); + assert!(cache.number_for_hash(&hashes[1]).is_none()); + assert!(cache.number_for_hash(&hashes[2]).is_some()); + } + + #[test] + fn test_confirm_cache_remove_block_by_number_returns_block_and_cleans_indices() { + let mut cache = TestConfirmCache::new(); + let block = make_executed_block(5, B256::ZERO); + let block_hash = block.recovered_block.hash(); + cache.insert(5, block, empty_receipts()).expect("insert"); + let removed = cache.remove_block_by_number(5); + assert!(removed.is_some()); + assert_eq!(cache.len(), 0); + assert!(cache.get_block_by_number(5).is_none()); + assert!(cache.number_for_hash(&block_hash).is_none()); + } + + #[test] + fn test_confirm_cache_remove_block_by_hash_returns_block_and_cleans_indices() { + let mut cache = TestConfirmCache::new(); + let block = make_executed_block(7, B256::ZERO); + let block_hash = block.recovered_block.hash(); + cache.insert(7, block, empty_receipts()).expect("insert"); + let removed = cache.remove_block_by_hash(&block_hash); + assert!(removed.is_some()); + assert_eq!(cache.len(), 0); + assert!(cache.get_block_by_hash(&block_hash).is_none()); + assert!(cache.get_block_by_number(7).is_none()); + } + + #[test] + fn test_confirm_cache_get_executed_blocks_up_to_height_returns_contiguous_blocks_newest_first() + { + let mut cache = TestConfirmCache::new(); + let block2 = make_executed_block(2, B256::repeat_byte(0x01)); + let block3 = make_executed_block(3, block2.recovered_block.hash()); + let block4 = make_executed_block(4, block3.recovered_block.hash()); + cache.insert(2, block2, empty_receipts()).expect("insert 2"); + cache.insert(3, block3, empty_receipts()).expect("insert 3"); + cache.insert(4, block4, empty_receipts()).expect("insert 4"); + let blocks = cache.get_executed_blocks_up_to_height(4, 1).unwrap(); + assert_eq!(blocks.len(), 3); + assert_eq!(blocks[0].recovered_block.number(), 4); + assert_eq!(blocks[1].recovered_block.number(), 3); + assert_eq!(blocks[2].recovered_block.number(), 2); + } + + #[test] + fn test_confirm_cache_get_executed_blocks_up_to_height_returns_empty_on_empty_cache() { + let cache = TestConfirmCache::new(); + let result = cache.get_executed_blocks_up_to_height(5, 1); + assert!(result.unwrap().is_empty()); + } + + #[test] + fn test_confirm_cache_get_executed_blocks_detects_gap_between_canonical_and_overlay() { + let mut cache = TestConfirmCache::new(); + let block3 = make_executed_block(3, B256::ZERO); + cache.insert(3, block3, empty_receipts()).expect("insert 3"); + assert!(cache.get_executed_blocks_up_to_height(3, 1).is_err()); + } + + #[test] + fn test_confirm_cache_get_executed_blocks_detects_non_contiguous_overlay() { + let mut cache = TestConfirmCache::new(); + let block2 = make_executed_block(2, B256::repeat_byte(0x01)); + let block4 = make_executed_block(4, B256::repeat_byte(0x03)); + cache.insert(2, block2, empty_receipts()).expect("insert 2"); + cache.insert(4, block4, empty_receipts()).expect("insert 4"); + assert!(cache.get_executed_blocks_up_to_height(4, 1).is_err()); + } + + #[test] + fn test_confirm_cache_get_executed_blocks_allows_redundant_overlap_with_canonical() { + let mut cache = TestConfirmCache::new(); + let block2 = make_executed_block(2, B256::repeat_byte(0x01)); + let block3 = make_executed_block(3, block2.recovered_block.hash()); + cache.insert(2, block2, empty_receipts()).expect("insert 2"); + cache.insert(3, block3, empty_receipts()).expect("insert 3"); + let blocks = cache.get_executed_blocks_up_to_height(3, 2).unwrap(); + assert_eq!(blocks.len(), 2); + } + + #[test] + fn test_confirm_cache_get_executed_blocks_single_block_contiguous_with_canonical() { + let mut cache = TestConfirmCache::new(); + let block5 = make_executed_block(5, B256::repeat_byte(0x04)); + cache.insert(5, block5, empty_receipts()).expect("insert 5"); + let blocks = cache.get_executed_blocks_up_to_height(5, 4).unwrap(); + assert_eq!(blocks.len(), 1); + assert_eq!(blocks[0].recovered_block.number(), 5); + } + + #[test] + fn test_confirm_cache_get_executed_blocks_returns_subset_up_to_target() { + let mut cache = TestConfirmCache::new(); + let block2 = make_executed_block(2, B256::repeat_byte(0x01)); + let block3 = make_executed_block(3, block2.recovered_block.hash()); + let block4 = make_executed_block(4, block3.recovered_block.hash()); + let block5 = make_executed_block(5, block4.recovered_block.hash()); + cache.insert(2, block2, empty_receipts()).expect("insert 2"); + cache.insert(3, block3, empty_receipts()).expect("insert 3"); + cache.insert(4, block4, empty_receipts()).expect("insert 4"); + cache.insert(5, block5, empty_receipts()).expect("insert 5"); + let blocks = cache.get_executed_blocks_up_to_height(3, 1).unwrap(); + assert_eq!(blocks.len(), 2); + assert_eq!(blocks[0].recovered_block.number(), 3); + assert_eq!(blocks[1].recovered_block.number(), 2); + } + + #[test] + fn test_confirm_cache_insert_same_height_twice_keeps_cache_len_at_one() { + let mut cache = TestConfirmCache::new(); + let block_a = make_executed_block(10, B256::ZERO); + let block_b = make_executed_block(10, B256::repeat_byte(0xFF)); + cache.insert(10, block_a, empty_receipts()).expect("first insert"); + cache.insert(10, block_b, empty_receipts()).expect("second insert"); + assert_eq!(cache.len(), 1); + } + + #[test] + fn test_confirm_cache_get_tx_info_returns_none_for_unknown_hash() { + let cache = TestConfirmCache::new(); + assert!(cache.get_tx_info(&B256::repeat_byte(0xAA)).is_none()); + } + + #[test] + fn test_confirm_cache_insert_builds_tx_index_correctly() { + let mut cache = TestConfirmCache::new(); + let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 3); + let block_hash = block.recovered_block.hash(); + let tx_hashes: Vec<_> = + block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + cache.insert(1, block, receipts).expect("insert"); + + for (i, tx_hash) in tx_hashes.iter().enumerate() { + let (info, bar) = cache.get_tx_info(tx_hash).expect("tx should be in tx_index"); + assert_eq!(info.block_number, 1); + assert_eq!(info.block_hash, block_hash); + assert_eq!(info.tx_index, i as u64); + assert_eq!(bar.block.number(), 1); + } + } + + #[test] + fn test_confirm_cache_flush_cleans_tx_index() { + let mut cache = TestConfirmCache::new(); + let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); + let tx_hashes: Vec<_> = + block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + cache.insert(1, block, receipts).expect("insert"); + + cache.flush_up_to_height(1); + for tx_hash in &tx_hashes { + assert!(cache.get_tx_info(tx_hash).is_none()); + } + } + + #[test] + fn test_confirm_cache_remove_block_by_number_cleans_tx_index() { + let mut cache = TestConfirmCache::new(); + let (block, receipts) = make_executed_block_with_txs(5, B256::ZERO, 0, 2); + let tx_hashes: Vec<_> = + block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + cache.insert(5, block, receipts).expect("insert"); + + cache.remove_block_by_number(5); + for tx_hash in &tx_hashes { + assert!(cache.get_tx_info(tx_hash).is_none()); + } + } + + #[test] + fn test_confirm_cache_insert_duplicate_height_leaks_stale_hash_index() { + let mut cache = TestConfirmCache::new(); + let block_a = make_executed_block(10, B256::ZERO); + let hash_a = block_a.recovered_block.hash(); + let block_b = make_executed_block(10, B256::repeat_byte(0xFF)); + let hash_b = block_b.recovered_block.hash(); + + cache.insert(10, block_a, empty_receipts()).expect("first insert"); + cache.insert(10, block_b, empty_receipts()).expect("second insert"); + + assert_eq!(cache.number_for_hash(&hash_b), Some(10)); + // Documents known limitation: BTreeMap::insert overwrites the value + // but doesn't clean the old hash_to_number entry. + assert_eq!( + cache.number_for_hash(&hash_a), + Some(10), + "stale hash_to_number entry remains (known limitation)" + ); + } + + #[test] + fn test_confirm_cache_flush_cleans_tx_index_for_partial_flush() { + let mut cache = TestConfirmCache::new(); + let (block1, receipts1) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); + let tx_hashes_1: Vec<_> = + block1.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + let parent = block1.recovered_block.hash(); + cache.insert(1, block1, receipts1).expect("insert 1"); + + let (block2, receipts2) = make_executed_block_with_txs(2, parent, 100, 2); + let tx_hashes_2: Vec<_> = + block2.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + cache.insert(2, block2, receipts2).expect("insert 2"); + + cache.flush_up_to_height(1); + for tx_hash in &tx_hashes_1 { + assert!(cache.get_tx_info(tx_hash).is_none(), "block 1 tx should be gone"); + } + for tx_hash in &tx_hashes_2 { + assert!(cache.get_tx_info(tx_hash).is_some(), "block 2 tx should remain"); + } + } + + #[test] + fn test_confirm_cache_clear_cleans_tx_index() { + let mut cache = TestConfirmCache::new(); + let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); + let tx_hashes: Vec<_> = + block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + cache.insert(1, block, receipts).expect("insert"); + + cache.clear(); + for tx_hash in &tx_hashes { + assert!(cache.get_tx_info(tx_hash).is_none()); + } + } +} diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs index 946cabf0..d0655be1 100644 --- a/crates/flashblocks/src/cache/pending.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -64,203 +64,139 @@ impl PendingSequence { #[cfg(test)] mod tests { - use op_alloy_rpc_types_engine::OpFlashblockPayload; + use super::*; + use alloy_consensus::{BlockHeader, Header, Receipt, TxEip7702}; + use alloy_primitives::{Address, Bytes, PrimitiveSignature, B256, U256}; + use op_alloy_consensus::{OpReceipt, OpTypedTransaction}; + use reth_chain_state::{ComputedTrieData, ExecutedBlock}; + use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; + use reth_optimism_primitives::{OpBlock, OpPrimitives, OpTransactionSigned}; + use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; + use reth_rpc_eth_types::PendingBlock; + use std::{collections::HashMap, sync::Arc, time::Instant}; - #[test] - fn test_flashblock_serde_roundtrip() { - let raw = r#"{ - "diff": { - "block_hash": "0x2d902e3fcb5bd57e0bf878cbbda1386e7fb8968d518912d58678a35e58261c46", - "gas_used": "0x2907796", - "logs_bloom": "0x5c21065292452cfcd5175abfee20e796773da578307356043ba4f62692aca01204e8908f97ab9df43f1e9c57f586b1c9a7df8b66ffa7746dfeeb538617fea5eb75ad87f8b6653f597d86814dc5ad6de404e5a48aeffcc4b1e170c2bdbc7a334936c66166ba0faa6517597b676ef65c588342756f280f7d610aa3ed35c5d877449bfacbdb9b40d98c457f974ab264ec40e4edd6e9fab4c0cb794bf75f10ea20dab75a1f9fd1c441d4c365d1476841e8593f1d1b9a1c52919a0fcf9fc5eef2ef82fe80971a72d1cde1cb195db4806058a229e88acfddfe1a1308adb6f69afa3aaf67f4bd49e93e9f9532ea30bd891a8ff08de61fb645bec678db816950b47fcef0", - "receipts_root": "0x2c4203e9aa87258627bf23ab4d5f9d92da30285ea11dc0b3e140a5a8d4b63e26", - "state_root": "0x0000000000000000000000000000000000000000000000000000000000000000", - "transactions": [ - "0x02f8c2822105830b0c58840b677c0f840c93fb5a834c4b4094d599955d17a1378651e76557ffc406c71300fcb080b851020026000100271000c8e9d514f85b57b70de033e841d788ab4df1acd691802acc26dcd13fb9e38fa8e10001004e2000c8e9d55bd42770e29cb76904377ffdb22737fc9f5eb36fde875fcbfa687b1c3023c080a07e8486ab3db9f07588a3f37bd8ffb9b349ba9bb738a2500d78a4583e1e54a6f9a068d0b3c729a6777c81dd49bd0c2dc3a079f0ceed4e778fbfe79176e8b70d68d8", - "0xf90fae820248840158a3c58307291a94bbbfd134e9b44bfb5123898ba36b01de7ab93d9880b90f443087505600000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000012000000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f403000000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f40300000000000000000000000000000000000000000000000000000000000000001000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda0291300000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000004b2ee6f00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000018000000000000000000000000000000000000000000000000000000000000003600000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000044095ea7b30000000000000000000000000000000000001ff3684f28c67538d4d072c227340000000000000000000000000000000000000000000000000000000004b2ee6f00000000000000000000000000000000000000000000000000000000000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e22200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000000001000000000000000000000000f70da97812cb96acdf810712aa562db8dfa3dbef000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000133f4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ff3684f28c67538d4d072c2273400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000007e42213bc0b000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b000000000000000000000000ea758cac6115309b325c582fd0782d79e350217700000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000007041fff991f000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b06d9200000000000000000000000000000000000000000000000000000000000000a0d311e79cd2099f6f1f0607040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000058000000000000000000000000000000000000000000000000000000000000000e4c1fb425e000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000069073bb900000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003c438c9c147000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000002710000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c800000000000000000000000000000000000000000000000000000000000001c400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000002e4945bcec9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000220000000000000000000000000ea758cac6115309b325c582fd0782d79e35021770000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000069073bb9000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000208f360baf899845441eccdc46525e26bb8860752a0002000000000000000001cd000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca00000000000000000000000000000000000000000000000000000000000000027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008434ee90ca000000000000000000000000f5c4f3dc02c3fb9279495a8fef7b0741da956157000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b1a7880000000000000000000000000000000000000000000000000000000000002710000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e22200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca000000000000000000000000000000000000000000000000000000000000000100000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f403000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002887696e8edbbcbd7306955512ff6f2d8426403eef4762157da3e9c5a89d78f682422da0c8d8b1aa1c9bfd1fe1e4a10c6123caa2fe582294aa5798c54546faa4c09590a9a012a1c78fca9cfefd281c1e44682de3c4420299da5cf2ae498f67d7de7dcf166c", - "0x02f8f582210582a649831db02984026c1a34833d090094f2cb4e685946beecbc9ce5f318b68edc583bcfa080b88600000000000069073af31c4289d066d04f33681f6686155c8243dff963557765630a39bdd8c54e6b7dbe5d4b689e9d536608db03163882cf005f7b5813e41d2fdec75161c8470a410c4c9201000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000c080a088fd1a2b2e5891109afc3845b2c8b0ca76ea8306190dcb80a703a2451f7bab25a0718ae373e36c8ddb2b934ca936ed824db22c0625cfea29be3d408ff41787fc8c", - "0x02f9030b822105830536f9830f58ab84025c6b93833d090094c90d989d809e26b2d95fb72eb3288fef72af8c2f80b9029a00000000000069073af31c3d4d0646e102b6f958428cd8ed562efa6efb234f629b5f6ca52a15fd2e33aea76eb64fb04cae81b3e5b769dbdc681dcfd4b7a802a2cacdf1ccb65276a722c67607000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000010206777762d3eb91810b15526c2c9102864d722ef7a9ed24e77271c1dcbf0fdcba68138800000000010698c8f03094a9e65ccedc14c40130e4a5dd0ce14fb12ea58cbeac11f662b458b9271000000000000003045a9ad2bb92b0b3e5c571fdd5125114e04e02be1a0bb80000000001036e55486ea6b8691ba58224f3cae35505add86c372710000000000003681d6e4b0b020656ca04956ddaf76add7ef022f60dac00000000010003028be0fcdd7cf0b53b7b82b8f6ea8586d07c53359f2710000000000006c30e25679d5c77b257ac3a61ad08603b11e7afe77ac9222a5386c27d08b6b6c3ea6000000000010696d4b53a38337a5733179751781178a2613306063c511b78cd02684739288c0a01f400000000000002020d028b2d7a29d2e57efc6405a1dce1437180e3ce27100000000001068a71465e76d736564b0c90f5cf3d0d7b69c461c36f69250ae27dbead147cc8f80bb80000000000000206354def8b7e6b2ee04bf85c00f5e79f173d0b76d5017bab3a90c7ba62e1722699000000000000010245f3ad9e63f629be6e278cc4cf34d3b0a79a4a0b27100000000000010404b154dbcd3c75580382c2353082df4390613d93c627120000000001011500cc7d9c2b460720a48cc7444d7e7dfe43f6050bb80a03000000015c8dec5f0eedf1f8934815ef8fb8cb8198eac6520bb80a030000010286f3dd3b4d08de718d7909b0fdc16f4cbdf94ef527100000000000c001a0d4c12f6433ff6ea0573633364c030d8b46ed5764494f80eb434f27060c39f315a034df82c4ac185a666280d578992feee0c05fc75d93e3e2286726c85fba1bb0a0", - "0x02f8f68221058305c7b3830f4ef58401a5485d832dc6c094f2cb4e685946beecbc9ce5f318b68edc583bcfa080b88600000000000069073af31b777ac6b2082fc399fde92a814114b7896ca0b0503106910ea099d5e32c93bfc0013ed2850534c3f8583ab7276414416c0d15ac021126f6cb6ca1ed091ddc01eb01000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000c080a09694b95dc893bed698ede415c188db3530ccc98a01d79bb9f11d783de7dddde9a0275b0165ab21ea0e6f721c624aa2270a3f98276ca0c95381d90e3f9d434b4881", - "0x02f8f682210583034573830f4ef58401a5485d832dc6c094f2cb4e685946beecbc9ce5f318b68edc583bcfa080b88600000000000069073af31c970da8f2adb8bafe6d254ec4428f8342508e169f75e8450f6ff8488813dfa638395e16787966f01731fddffd0e7352cde07fd24bba283bd27f1828fb2a0c700701000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000c080a00181afe4bedab67692a9c1ff30a89fde6b3d3c8407a47a2777efcd6bdc0c39d2a022d6a4219e72eebdbc5d31ae998243ccec1b192c5c7c586308ccddb4838cd631", - "0x02f8c1822105830b0cfd830f4ed084013bce1b834c4b4094d599955d17a1378651e76557ffc406c71300fcb080b851020026000100271000c8e9d514f85b57b70de033e841d788ab4df1acd691802acc26dcd13fb9e38fa8e10001004e2000c8e9d55bd42770e29cb76904377ffdb22737fc9f5eb36fde875fcbfa687b1c3023c001a0d87c4e16986db55b8846bccfe7bca824b75216e72d8f92369c46681800285cb2a00ec53251be3c2a0d19884747d123ddb0ada3c0a917b21882e297e95c2294d52a", - "0x02f901d58221058306361d830f4240840163efbc8301546194833589fcd6edb6e08f4c7c32d4f71b54bda0291380b90164cf092995000000000000000000000000d723d9f752c19faf88a5fd2111a38d0cc5d395b00000000000000000000000000b55712de2ce8f93be30d53c03d48ea275cd14d000000000000000000000000000000000000000000000000000000000000003e8000000000000000000000000000000000000000000000000000000006907385e0000000000000000000000000000000000000000000000000000000069073be2bef9866b70d0bb74d8763996eb5967b1b24cd48f7801f94ad80cb49431df6b1d00000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000417c9c2382c6c3f029aa3dcbf1df075366fae7bc9fba7f3729713e0bf4d518951f5340350208db96af23686d9985ce552e3588244456a23ca99ecbcae779ea11e71c00000000000000000000000000000000000000000000000000000000000000c080a0b1090c8c67ca9a49ba3591c72c8851f187bbfc39b1920dff2f6c0157ed1ada39a0265b7f704f4c1b5c2c5ca57f1a4040e1e48878c9ad5f2cca9c4e6669d12989f2", - "0x02f8c1822105830b0c98830f424084013bc18b834c4b4094d599955d17a1378651e76557ffc406c71300fcb080b851020026000100271000c8e9d514f85b57b70de033e841d788ab4df1acd691802acc26dcd13fb9e38fa8e10001004e2000c8e9d55bd42770e29cb76904377ffdb22737fc9f5eb36fde875fcbfa687b1c3023c001a080a96d18ae46b58d9a470846a05b394ab4a49a2e379de1941205684e1ac291f9a01e6d4d2c6bab5bf8b89f1df2d6beb85d9f1b3f3be73ca2b72e4ad2d9da0d12d2", - "0x02f901d48221058231e0830f4240840163efbc8301544d94833589fcd6edb6e08f4c7c32d4f71b54bda0291380b90164cf0929950000000000000000000000001de8dbc2409c4bbf14445b0d404bb894f0c6cff70000000000000000000000008d8fa42584a727488eeb0e29405ad794a105bb9b0000000000000000000000000000000000000000000000000000000000002710000000000000000000000000000000000000000000000000000000006907385d0000000000000000000000000000000000000000000000000000000069073af16b129c414484e011621c44e0b32451fdbd69e63ef4919f427dde08c16cb199b100000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000041ae0a4b618c30f0e5d92d7fe99bb435413b2201711427699fd285f69666396cee76199d4e901cfb298612cb3b8ad06178cefb4136a8bc1be07c01b5fea80e5ec11b00000000000000000000000000000000000000000000000000000000000000c080a0af315068084aae367f00263dbd872908bbb9ceaefd6b792fc48dd357e6bdf8afa01e7f0e5913570394b9648939ef71fc5ac34fe320a2757ec388316731a335e69f", - "0x02f9022f82210583052d0b830f423f84025c5527833d090094c90d989d809e26b2d95fb72eb3288fef72af8c2f80b901be00000000000069073af31cf0f932cecc8c4c6ffffa554a63e8fba251434483ed3903966d2ba5a70121618a1c45bd9ee158192ab8d7e12ce0f447f2848a48aedaa89e0efa8637bb931745de05000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e2000000000000003045a9ad2bb92b0b3e5c571fdd5125114e04e02be1a0bb80000000001036e55486ea6b8691ba58224f3cae35505add86c372710000000000003681d6e4b0b020656ca04956ddaf76add7ef022f60dac0000000001010206777762d3eb91810b15526c2c9102864d722ef7a9ed24e77271c1dcbf0fdcba68138800000000010698c8f03094a9e65ccedc14c40130e4a5dd0ce14fb12ea58cbeac11f662b458b9271000000000000002005554419ccd0293d9383901f461c7c3e0c66e925f0bb80000000001028eb9437532fac8d6a7870f3f887b7978d20355fc271000000000000003035d28f920c9d23100e4a38b2ba2d8ae617c3b261501f4000000000102bc51db8aec659027ae0b0e468c0735418161a7800bb8000000000003dbc6998296caa1652a810dc8d3baf4a8294330f100500000000000c080a040000b130b1759df897a9573691a3d1cafacc6d95d0db1826f275afc30e2ff63a0400a7514f8d5383970c4412205ec8e9c6ca06acea504acabd2d3c36e9cb5003d" - ], - "withdrawals": [], - "withdrawals_root": "0x81864c23f426ad807d66c9fdde33213e1fdbac06c1b751d279901d1ce13670ac" - }, - "index": 10, - "metadata": { - "block_number": 37646058, - "new_account_balances": { - "0x000000000022d473030f116ddee9f6b43ac78ba3": "0x0", - "0x0000000071727de22e5e9d8baf0edac6f37da032": "0x23281e39594556899", - "0x0000f90827f1c53a10cb7a02335b175320002935": "0x0", - "0x000f3df6d732807ef1319fb7b8bb8522d0beac02": "0x0" - }, - "receipts": { - "0x1a766690fd6d0febffc488f12fbd7385c43fbe1e07113a1316f22f176355297e": { - "Legacy": { - "cumulativeGasUsed": "0x2868d76", - "logs": [ - { - "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", - "data": "0x0000000000000000000000000000000000000000000000000000000004b2ee6f", - "topics": [ - "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", - "0x00000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f4030", - "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222" - ] - }, - { - "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", - "data": "0x0000000000000000000000000000000000000000000000000000000004b2ee6f", - "topics": [ - "0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925", - "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", - "0x0000000000000000000000000000000000001ff3684f28c67538d4d072c22734" - ] - }, - { - "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", - "data": "0x000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000044095ea7b30000000000000000000000000000000000001ff3684f28c67538d4d072c227340000000000000000000000000000000000000000000000000000000004b2ee6f00000000000000000000000000000000000000000000000000000000", - "topics": [ - "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" - ] - }, - { - "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", - "data": "0x00000000000000000000000000000000000000000000000000000000000133f4", - "topics": [ - "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", - "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", - "0x000000000000000000000000f70da97812cb96acdf810712aa562db8dfa3dbef" - ] - }, - { - "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", - "data": "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e2220000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000000001000000000000000000000000f70da97812cb96acdf810712aa562db8dfa3dbef000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000133f400000000000000000000000000000000000000000000000000000000", - "topics": [ - "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" - ] - }, - { - "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", - "data": "0x0000000000000000000000000000000000000000000000000000000004b1ba7b", - "topics": [ - "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", - "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", - "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177" - ] - }, - { - "address": "0x8f360baf899845441eccdc46525e26bb8860752a", - "data": "0x00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000001957cc57b7a9959c0000000000000000000000000000000000000000000000001957cc57b7a9959800000000000000000000000000000000000000000000000444e308096a22c339000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000092458cc3a866f04600000000000000000000000000000000000000000000000025f3e27916e84b59000", - "topics": [ - "0x4e1d56f7310a8c32b2267f756b19ba65019b4890068ce114a25009abe54de5ba" - ] - }, - { - "address": "0xba12222222228d8ba445958a75a0704d566bf2c8", - "data": "0x0000000000000000000000000000000000000000000000000000000004b1ba7b0000000000000000000000000000000000000000000000000000000004b1a44c", - "topics": [ - "0x2170c741c41531aec20e7c107c24eecfdd15e69c9bb0a8dd37b1840b9e0b207b", - "0x8f360baf899845441eccdc46525e26bb8860752a0002000000000000000001cd", - "0x000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913", - "0x000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca" - ] - }, - { - "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", - "data": "0x0000000000000000000000000000000000000000000000000000000004b1ba7b", - "topics": [ - "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", - "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177", - "0x000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c8" - ] - }, - { - "address": "0xd9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca", - "data": "0x0000000000000000000000000000000000000000000000000000000004b1a44c", - "topics": [ - "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", - "0x000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c8", - "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177" - ] + fn make_executed_block(block_number: u64, parent_hash: B256) -> ExecutedBlock { + let header = Header { number: block_number, parent_hash, ..Default::default() }; + let sealed_header = SealedHeader::seal_slow(header); + let block = OpBlock::new(sealed_header.unseal(), Default::default()); + let sealed_block = SealedBlock::seal_slow(block); + let recovered_block = RecoveredBlock::new_sealed(sealed_block, vec![]); + let execution_output = Arc::new(BlockExecutionOutput { + result: BlockExecutionResult { + receipts: vec![], + requests: Default::default(), + gas_used: 0, + blob_gas_used: 0, }, - { - "address": "0xd9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca", - "data": "0x0000000000000000000000000000000000000000000000000000000004b1a44c", - "topics": [ - "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", - "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177", - "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222" - ] - }, - { - "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", - "data": "0x0000000000000000000000000000000000001ff3684f28c67538d4d072c227340000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007e42213bc0b000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b000000000000000000000000ea758cac6115309b325c582fd0782d79e350217700000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000007041fff991f000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b06d9200000000000000000000000000000000000000000000000000000000000000a0d311e79cd2099f6f1f0607040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000058000000000000000000000000000000000000000000000000000000000000000e4c1fb425e000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000069073bb900000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003c438c9c147000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000002710000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c800000000000000000000000000000000000000000000000000000000000001c400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000002e4945bcec9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000220000000000000000000000000ea758cac6115309b325c582fd0782d79e35021770000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000069073bb9000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000208f360baf899845441eccdc46525e26bb8860752a0002000000000000000001cd000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca00000000000000000000000000000000000000000000000000000000000000027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008434ee90ca000000000000000000000000f5c4f3dc02c3fb9279495a8fef7b0741da956157000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b1a7880000000000000000000000000000000000000000000000000000000000002710000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "topics": [ - "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" - ] - }, - { - "address": "0xd9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca", - "data": "0x0000000000000000000000000000000000000000000000000000000004b1a44c", - "topics": [ - "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", - "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", - "0x00000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f4030" - ] - }, - { - "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", - "data": "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e2220000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca000000000000000000000000000000000000000000000000000000000000000100000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f40300000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "topics": [ - "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" - ] - } - ], - "status": "0x1" - } - }, - "0x2cd6b4825b5ee40b703c947e15630336dceda97825b70412da54ccc27f484496": { - "Eip1559": { - "cumulativeGasUsed": "0x28cca69", - "logs": [ - { - "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", - "data": "0x", - "topics": [ - "0x98de503528ee59b575ef0c0a2576a82497bfc029a5685b209e9ec333479b10a5", - "0x000000000000000000000000d723d9f752c19faf88a5fd2111a38d0cc5d395b0", - "0xbef9866b70d0bb74d8763996eb5967b1b24cd48f7801f94ad80cb49431df6b1d" - ] - }, - { - "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", - "data": "0x00000000000000000000000000000000000000000000000000000000000003e8", - "topics": [ - "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", - "0x000000000000000000000000d723d9f752c19faf88a5fd2111a38d0cc5d395b0", - "0x0000000000000000000000000b55712de2ce8f93be30d53c03d48ea275cd14d0" - ] - } - ], - "status": "0x1" + state: Default::default(), + }); + ExecutedBlock::new(Arc::new(recovered_block), execution_output, ComputedTrieData::default()) + } + + fn make_pending_sequence(block_number: u64) -> PendingSequence { + let executed = make_executed_block(block_number, B256::ZERO); + let block_hash = executed.recovered_block.hash(); + let parent_hash = executed.recovered_block.parent_hash(); + let pending_block = PendingBlock::with_executed_block(Instant::now(), executed); + PendingSequence::new( + pending_block, + HashMap::new(), + Default::default(), + block_hash, + parent_hash, + 0, + ) + } + + fn mock_tx(nonce: u64) -> OpTransactionSigned { + let tx = TxEip7702 { + chain_id: 1u64, + nonce, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 21_000, + to: Address::default(), + value: U256::ZERO, + input: Bytes::new(), + access_list: Default::default(), + authorization_list: Default::default(), + }; + let signature = PrimitiveSignature::new(U256::default(), U256::default(), true); + OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature) + } + + fn make_pending_sequence_with_txs( + block_number: u64, + tx_count: usize, + ) -> PendingSequence { + use alloy_consensus::transaction::TxHashRef; + + let executed = make_executed_block(block_number, B256::ZERO); + let block_hash = executed.recovered_block.hash(); + let parent_hash = executed.recovered_block.parent_hash(); + + let mut tx_index = HashMap::new(); + for i in 0..tx_count { + let tx = mock_tx(i as u64); + let tx_hash = *tx.tx_hash(); + let receipt = OpReceipt::Eip7702(Receipt { + status: true.into(), + cumulative_gas_used: 21_000 * (i as u64 + 1), + logs: vec![], + }); + tx_index.insert( + tx_hash, + CachedTxInfo { block_number, block_hash, tx_index: i as u64, tx, receipt }, + ); } - } + + let pending_block = PendingBlock::with_executed_block(Instant::now(), executed); + PendingSequence::new( + pending_block, + tx_index, + Default::default(), + block_hash, + parent_hash, + 0, + ) + } + + #[test] + fn test_pending_sequence_get_hash_returns_stored_block_hash() { + let seq = make_pending_sequence(42); + assert_eq!(seq.get_hash(), seq.block_hash); + } + + #[test] + fn test_pending_sequence_get_height_returns_block_number() { + let seq = make_pending_sequence(99); + assert_eq!(seq.get_height(), 99); + } + + #[test] + fn test_pending_sequence_get_block_and_receipts_empty_receipts_on_no_tx_block() { + let seq = make_pending_sequence(3); + let bar = seq.get_block_and_receipts(); + assert!(bar.receipts.is_empty()); } - }, - "payload_id": "0x0316ecb1aa1671b5" -}"#; - let flashblock: OpFlashblockPayload = serde_json::from_str(raw).expect("deserialize"); - let serialized = serde_json::to_string(&flashblock).expect("serialize"); - let roundtrip: OpFlashblockPayload = serde_json::from_str(&serialized).expect("roundtrip"); + #[test] + fn test_pending_sequence_get_tx_info_returns_none_for_unknown_hash() { + let seq = make_pending_sequence_with_txs(10, 2); + assert!(seq.get_tx_info(&B256::repeat_byte(0xFF)).is_none()); + } + + #[test] + fn test_pending_sequence_get_tx_info_returns_correct_info_for_known_tx() { + use alloy_consensus::transaction::TxHashRef; - assert_eq!(flashblock, roundtrip); + let seq = make_pending_sequence_with_txs(42, 3); + let (tx_hash, expected_info) = seq.tx_index.iter().next().unwrap(); + let (info, bar) = seq.get_tx_info(tx_hash).expect("known tx hash should return Some"); + assert_eq!(info.block_number, 42); + assert_eq!(info.block_hash, seq.block_hash); + assert_eq!(info.tx_index, expected_info.tx_index); + assert_eq!(*info.tx.tx_hash(), *tx_hash); + assert_eq!(bar.block.number(), 42); } } diff --git a/crates/flashblocks/src/cache/raw.rs b/crates/flashblocks/src/cache/raw.rs index 5aa5e754..86afa3ea 100644 --- a/crates/flashblocks/src/cache/raw.rs +++ b/crates/flashblocks/src/cache/raw.rs @@ -187,3 +187,607 @@ impl RawFlashblocksEntry { self.recovered_transactions_by_index.values().map(Vec::len).sum() } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::TestFlashBlockFactory; + use reth_optimism_primitives::OpTransactionSigned; + + type TestRawCache = RawFlashblocksCacheInner; + + // ===== RawFlashblocksEntry tests via RawFlashblocksCacheInner ===== + + // --- can_accept --- + + #[test] + fn test_raw_entry_can_accept_first_flashblock_on_empty_entry() { + // Arrange + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let mut cache = TestRawCache::new(); + + // Act + let result = cache.handle_flashblock(fb0); + + // Assert: empty entry accepts anything without error + assert!(result.is_ok(), "empty entry should accept first flashblock"); + assert_eq!(cache.cache.len(), 1); + } + + #[test] + fn test_raw_entry_rejects_duplicate_index_in_same_sequence() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let fb0_dup = factory.flashblock_at(0).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb0).expect("first flashblock should succeed"); + let result = cache.handle_flashblock(fb0_dup); + assert!(result.is_err(), "duplicate index within same sequence should be rejected"); + } + + #[test] + fn test_raw_entry_rejects_mismatched_block_number() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let payload_id = fb0.payload_id; + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb0).expect("first flashblock should succeed"); + let fb_wrong_block = factory + .builder() + .index(1) + .block_number(999) // different block number + .payload_id(payload_id) + .build(); + let result = cache.handle_flashblock(fb_wrong_block); + assert!(result.is_ok(), "mismatched block number creates a new entry"); + assert_eq!(cache.cache.len(), 2, "should have two distinct entries"); + } + + #[test] + fn test_raw_entry_accepts_out_of_order_flashblocks_within_same_sequence() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let payload_id = fb0.payload_id; + let fb2 = factory.builder().index(2).block_number(100).payload_id(payload_id).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb0).expect("fb0 insert"); + let result = cache.handle_flashblock(fb2); + assert!(result.is_ok(), "out-of-order unique index should be accepted"); + } + + #[test] + fn test_raw_entry_get_best_revision_returns_none_without_base() { + let factory = TestFlashBlockFactory::new(); + let fb1 = factory.builder().index(1).block_number(100).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb1).expect("fb1 insert"); + let entry = cache.cache.iter().next().expect("entry should exist"); + let best = entry.get_best_revision(); + assert!(best.is_none(), "get_best_revision should return None without base (index 0)"); + } + + #[test] + fn test_raw_entry_get_best_revision_returns_zero_with_only_base() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + + let mut cache = TestRawCache::new(); + cache.handle_flashblock(fb0).expect("fb0 insert"); + let entry = cache.cache.iter().next().expect("entry should exist"); + let best = entry.get_best_revision(); + assert_eq!(best, Some(0), "only index 0 → best revision is 0"); + } + + #[test] + fn test_raw_entry_get_best_revision_with_consecutive_sequence() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let fb1 = factory.flashblock_after(&fb0).build(); + let fb2 = factory.flashblock_after(&fb1).build(); + let fb3 = factory.flashblock_after(&fb2).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb0).expect("fb0"); + cache.handle_flashblock(fb1).expect("fb1"); + cache.handle_flashblock(fb2).expect("fb2"); + cache.handle_flashblock(fb3).expect("fb3"); + let entry = cache.cache.iter().next().expect("entry should exist"); + let best = entry.get_best_revision(); + assert_eq!(best, Some(3), "consecutive 0..3 → best revision 3"); + } + + #[test] + fn test_raw_entry_get_best_revision_stops_at_gap() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let payload_id = fb0.payload_id; + let fb1 = factory.flashblock_after(&fb0).build(); + let fb3 = factory.builder().index(3).block_number(100).payload_id(payload_id).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb0).expect("fb0"); + cache.handle_flashblock(fb1).expect("fb1"); + cache.handle_flashblock(fb3).expect("fb3 (gap after index 1)"); + let entry = cache.cache.iter().next().expect("entry should exist"); + let best = entry.get_best_revision(); + assert_eq!(best, Some(1), "gap between 1 and 3 → best revision is 1"); + } + + #[test] + fn test_raw_cache_handle_canonical_height_evicts_entries_at_or_below_height() { + let factory = TestFlashBlockFactory::new(); + let fb100 = factory.flashblock_at(0).build(); + let fb101 = factory.flashblock_for_next_block(&fb100).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb100).expect("fb100"); + cache.handle_flashblock(fb101).expect("fb101"); + assert_eq!(cache.cache.len(), 2); + cache.handle_canonical_height(100); + assert_eq!(cache.cache.len(), 1, "block 100 entry should be evicted"); + let remaining = cache.cache.iter().next().expect("one entry should remain"); + assert_eq!(remaining.block_number(), Some(101), "remaining entry should be for block 101"); + } + + #[test] + fn test_raw_cache_handle_canonical_height_evicts_multiple_entries() { + // Arrange: insert flashblocks for blocks 100, 101, 102 + let factory = TestFlashBlockFactory::new(); + let fb100 = factory.flashblock_at(0).build(); + let fb101 = factory.flashblock_for_next_block(&fb100).build(); + let fb102 = factory.flashblock_for_next_block(&fb101).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb100).expect("fb100"); + cache.handle_flashblock(fb101).expect("fb101"); + cache.handle_flashblock(fb102).expect("fb102"); + assert_eq!(cache.cache.len(), 3); + cache.handle_canonical_height(102); + assert_eq!(cache.cache.len(), 0, "all entries at or below height 102 should be evicted"); + } + + #[test] + fn test_raw_cache_handle_canonical_height_keeps_entries_above_height() { + let factory = TestFlashBlockFactory::new(); + let fb100 = factory.flashblock_at(0).build(); + let fb101 = factory.flashblock_for_next_block(&fb100).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb100).expect("fb100"); + cache.handle_flashblock(fb101).expect("fb101"); + cache.handle_canonical_height(99); + assert_eq!(cache.cache.len(), 2, "no entries should be evicted below their block numbers"); + } + + #[test] + fn test_raw_cache_rejects_flashblock_at_or_below_canonical_height() { + let factory = TestFlashBlockFactory::new(); + let fb100 = factory.flashblock_at(0).build(); + let mut cache = TestRawCache::new(); + + cache.handle_canonical_height(100); + let result = cache.handle_flashblock(fb100); + assert!(result.is_ok(), "flashblock at canonical height returns Ok"); + assert_eq!(cache.cache.len(), 0, "flashblock at canonical height should not be inserted"); + } + + #[test] + fn test_raw_cache_groups_flashblocks_by_payload_id() { + let factory = TestFlashBlockFactory::new(); + let fb0_seq1 = factory.flashblock_at(0).build(); + let fb0_seq2 = factory.flashblock_for_next_block(&fb0_seq1).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb0_seq1.clone()).expect("seq1 fb0"); + cache.handle_flashblock(fb0_seq2.clone()).expect("seq2 fb0"); + let fb1_seq1 = factory.flashblock_after(&fb0_seq1).build(); + cache.handle_flashblock(fb1_seq1).expect("seq1 fb1"); + let entries: Vec<_> = cache.cache.iter().collect(); + assert_eq!(entries.len(), 2, "should have two separate entries for two payload_ids"); + } + + #[test] + fn test_raw_cache_ring_buffer_evicts_oldest_entry_when_full() { + let factory = TestFlashBlockFactory::new(); + let mut prev_fb = factory.flashblock_at(0).build(); + let first_block_num = prev_fb.metadata.block_number; + let mut cache = TestRawCache::new(); + cache.handle_flashblock(prev_fb.clone()).expect("first fb"); + + // Fill up to MAX_RAW_CACHE_SIZE (10) unique sequences + for _ in 1..MAX_RAW_CACHE_SIZE { + let next_fb = factory.flashblock_for_next_block(&prev_fb).build(); + cache.handle_flashblock(next_fb.clone()).expect("fill fb"); + prev_fb = next_fb; + } + assert_eq!(cache.cache.len(), MAX_RAW_CACHE_SIZE, "cache should be at max capacity"); + + // Insert one more sequence to trigger FIFO eviction + let overflow_fb = factory.flashblock_for_next_block(&prev_fb).build(); + let overflow_block_num = overflow_fb.metadata.block_number; + cache.handle_flashblock(overflow_fb).expect("overflow fb"); + + // Assert: cache is still at max size (oldest entry evicted) + assert_eq!(cache.cache.len(), MAX_RAW_CACHE_SIZE, "cache size should remain at max"); + // The oldest entry (first_block_num) should have been evicted + let has_first = cache.cache.iter().any(|e| e.block_number() == Some(first_block_num)); + let has_overflow = cache.cache.iter().any(|e| e.block_number() == Some(overflow_block_num)); + assert!(!has_first, "oldest entry should have been evicted"); + assert!(has_overflow, "newest entry should be present"); + } + + #[test] + fn test_raw_entry_block_number_returns_none_on_empty() { + let entry = RawFlashblocksEntry::::new(); + assert!(entry.block_number().is_none()); + } + + #[test] + fn test_raw_entry_block_number_returns_correct_value_after_insert() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let expected_block = fb0.metadata.block_number; + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb0).expect("fb0 insert"); + let entry = cache.cache.iter().next().expect("entry should exist"); + assert_eq!(entry.block_number(), Some(expected_block)); + } + + #[test] + fn test_raw_entry_transaction_count_is_zero_on_empty_flashblock() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); // no transactions set + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb0).expect("fb0 insert"); + let entry = cache.cache.iter().next().expect("entry should exist"); + assert_eq!(entry.transaction_count(), 0, "flashblock with no txs should have count 0"); + } + + #[test] + fn test_raw_entry_has_base_set_after_inserting_index_zero() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb0).expect("fb0 insert"); + let entry = cache.cache.iter().next().expect("entry should exist"); + assert!(entry.has_base, "has_base should be true after inserting index 0"); + } + + #[test] + fn test_raw_entry_has_base_not_set_when_only_non_zero_index_inserted() { + let factory = TestFlashBlockFactory::new(); + let fb1 = factory.builder().index(1).block_number(100).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb1).expect("fb1 insert"); + let entry = cache.cache.iter().next().expect("entry should exist"); + assert!(!entry.has_base, "has_base should be false when only index 1 inserted"); + } + + #[test] + fn test_raw_flashblocks_cache_handle_flashblock_inserts_via_arc_rwlock() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let mut cache = RawFlashblocksCache::::new(); + + let result = cache.handle_flashblock(fb0); + assert!(result.is_ok(), "handle_flashblock via Arc wrapper should succeed"); + } + + #[test] + fn test_raw_entry_get_best_revision_with_only_index_one_no_base() { + let factory = TestFlashBlockFactory::new(); + let fb1 = factory.builder().index(1).block_number(100).build(); + + let mut cache = TestRawCache::new(); + cache.handle_flashblock(fb1).expect("fb1 insert"); + let entry = cache.cache.iter().next().expect("entry should exist"); + let best = entry.get_best_revision(); + // Assert: no base → None, even though index 1 exists + assert!(best.is_none(), "no base means get_best_revision must return None"); + } + + #[test] + fn test_raw_entry_get_best_revision_gap_immediately_after_base() { + // Arrange: only index 0 and index 2, no index 1 + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let payload_id = fb0.payload_id; + let block_number = fb0.metadata.block_number; + let fb2 = + factory.builder().index(2).block_number(block_number).payload_id(payload_id).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb0).expect("fb0"); + cache.handle_flashblock(fb2).expect("fb2"); + let entry = cache.cache.iter().next().expect("entry should exist"); + let best = entry.get_best_revision(); + // Assert: gap immediately after base (index 1 missing) → best revision is 0 + assert_eq!(best, Some(0), "gap at index 1 means best revision stays at 0"); + } + + // --- can_accept edge cases --- + + #[test] + fn test_raw_entry_can_accept_rejects_mismatched_payload_id_with_same_block_number() { + // Arrange: insert fb with payload_id A at block 100 + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let different_payload_id = alloy_rpc_types_engine::PayloadId::new([ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + ]); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb0.clone()).expect("fb0 insert"); + let fb_diff = factory + .builder() + .index(0) + .block_number(fb0.metadata.block_number) + .payload_id(different_payload_id) + .build(); + let result = cache.handle_flashblock(fb_diff); + // Assert: new entry created (no error), but we now have 2 entries + assert!(result.is_ok(), "different payload_id with same block creates new entry"); + assert_eq!( + cache.cache.len(), + 2, + "different payload_id should produce a second cache entry" + ); + } + + #[test] + fn test_raw_cache_accumulates_flashblocks_into_single_entry_for_same_payload_id() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let fb1 = factory.flashblock_after(&fb0).build(); + let fb2 = factory.flashblock_after(&fb1).build(); + let fb3 = factory.flashblock_after(&fb2).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb0).expect("fb0"); + cache.handle_flashblock(fb1).expect("fb1"); + cache.handle_flashblock(fb2).expect("fb2"); + cache.handle_flashblock(fb3).expect("fb3"); + // Assert: all four go into a single entry (same payload_id) + assert_eq!( + cache.cache.len(), + 1, + "all flashblocks with the same payload_id should accumulate into one entry" + ); + let entry = cache.cache.iter().next().expect("entry should exist"); + assert_eq!(entry.payloads.len(), 4, "entry should contain 4 payloads"); + } + + #[test] + fn test_raw_entry_transactions_returns_empty_vec_on_empty_flashblock() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb0).expect("fb0 insert"); + let entry = cache.cache.iter().next().expect("entry should exist"); + let txs = entry.transactions(); + assert!(txs.is_empty(), "flashblock with no txs should return empty transactions vec"); + } + + #[test] + fn test_raw_entry_tx_hashes_consistent_with_transaction_count() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(fb0).expect("fb0 insert"); + let entry = cache.cache.iter().next().expect("entry should exist"); + assert_eq!( + entry.tx_hashes().len(), + entry.transaction_count(), + "tx_hashes length should match transaction_count" + ); + } + + #[test] + fn test_flashblock_serde_roundtrip() { + let raw = r#"{ + "diff": { + "block_hash": "0x2d902e3fcb5bd57e0bf878cbbda1386e7fb8968d518912d58678a35e58261c46", + "gas_used": "0x2907796", + "logs_bloom": "0x5c21065292452cfcd5175abfee20e796773da578307356043ba4f62692aca01204e8908f97ab9df43f1e9c57f586b1c9a7df8b66ffa7746dfeeb538617fea5eb75ad87f8b6653f597d86814dc5ad6de404e5a48aeffcc4b1e170c2bdbc7a334936c66166ba0faa6517597b676ef65c588342756f280f7d610aa3ed35c5d877449bfacbdb9b40d98c457f974ab264ec40e4edd6e9fab4c0cb794bf75f10ea20dab75a1f9fd1c441d4c365d1476841e8593f1d1b9a1c52919a0fcf9fc5eef2ef82fe80971a72d1cde1cb195db4806058a229e88acfddfe1a1308adb6f69afa3aaf67f4bd49e93e9f9532ea30bd891a8ff08de61fb645bec678db816950b47fcef0", + "receipts_root": "0x2c4203e9aa87258627bf23ab4d5f9d92da30285ea11dc0b3e140a5a8d4b63e26", + "state_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactions": [ + "0x02f8c2822105830b0c58840b677c0f840c93fb5a834c4b4094d599955d17a1378651e76557ffc406c71300fcb080b851020026000100271000c8e9d514f85b57b70de033e841d788ab4df1acd691802acc26dcd13fb9e38fa8e10001004e2000c8e9d55bd42770e29cb76904377ffdb22737fc9f5eb36fde875fcbfa687b1c3023c080a07e8486ab3db9f07588a3f37bd8ffb9b349ba9bb738a2500d78a4583e1e54a6f9a068d0b3c729a6777c81dd49bd0c2dc3a079f0ceed4e778fbfe79176e8b70d68d8", + "0xf90fae820248840158a3c58307291a94bbbfd134e9b44bfb5123898ba36b01de7ab93d9880b90f443087505600000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000012000000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f403000000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f40300000000000000000000000000000000000000000000000000000000000000001000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda0291300000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000004b2ee6f00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000018000000000000000000000000000000000000000000000000000000000000003600000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000044095ea7b30000000000000000000000000000000000001ff3684f28c67538d4d072c227340000000000000000000000000000000000000000000000000000000004b2ee6f00000000000000000000000000000000000000000000000000000000000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e22200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000000001000000000000000000000000f70da97812cb96acdf810712aa562db8dfa3dbef000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000133f4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ff3684f28c67538d4d072c2273400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000007e42213bc0b000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b000000000000000000000000ea758cac6115309b325c582fd0782d79e350217700000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000007041fff991f000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b06d9200000000000000000000000000000000000000000000000000000000000000a0d311e79cd2099f6f1f0607040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000058000000000000000000000000000000000000000000000000000000000000000e4c1fb425e000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000069073bb900000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003c438c9c147000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000002710000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c800000000000000000000000000000000000000000000000000000000000001c400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000002e4945bcec9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000220000000000000000000000000ea758cac6115309b325c582fd0782d79e35021770000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000069073bb9000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000208f360baf899845441eccdc46525e26bb8860752a0002000000000000000001cd000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca00000000000000000000000000000000000000000000000000000000000000027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008434ee90ca000000000000000000000000f5c4f3dc02c3fb9279495a8fef7b0741da956157000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b1a7880000000000000000000000000000000000000000000000000000000000002710000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e22200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca000000000000000000000000000000000000000000000000000000000000000100000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f403000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002887696e8edbbcbd7306955512ff6f2d8426403eef4762157da3e9c5a89d78f682422da0c8d8b1aa1c9bfd1fe1e4a10c6123caa2fe582294aa5798c54546faa4c09590a9a012a1c78fca9cfefd281c1e44682de3c4420299da5cf2ae498f67d7de7dcf166c", + "0x02f8f582210582a649831db02984026c1a34833d090094f2cb4e685946beecbc9ce5f318b68edc583bcfa080b88600000000000069073af31c4289d066d04f33681f6686155c8243dff963557765630a39bdd8c54e6b7dbe5d4b689e9d536608db03163882cf005f7b5813e41d2fdec75161c8470a410c4c9201000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000c080a088fd1a2b2e5891109afc3845b2c8b0ca76ea8306190dcb80a703a2451f7bab25a0718ae373e36c8ddb2b934ca936ed824db22c0625cfea29be3d408ff41787fc8c", + "0x02f9030b822105830536f9830f58ab84025c6b93833d090094c90d989d809e26b2d95fb72eb3288fef72af8c2f80b9029a00000000000069073af31c3d4d0646e102b6f958428cd8ed562efa6efb234f629b5f6ca52a15fd2e33aea76eb64fb04cae81b3e5b769dbdc681dcfd4b7a802a2cacdf1ccb65276a722c67607000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000010206777762d3eb91810b15526c2c9102864d722ef7a9ed24e77271c1dcbf0fdcba68138800000000010698c8f03094a9e65ccedc14c40130e4a5dd0ce14fb12ea58cbeac11f662b458b9271000000000000003045a9ad2bb92b0b3e5c571fdd5125114e04e02be1a0bb80000000001036e55486ea6b8691ba58224f3cae35505add86c372710000000000003681d6e4b0b020656ca04956ddaf76add7ef022f60dac00000000010003028be0fcdd7cf0b53b7b82b8f6ea8586d07c53359f2710000000000006c30e25679d5c77b257ac3a61ad08603b11e7afe77ac9222a5386c27d08b6b6c3ea6000000000010696d4b53a38337a5733179751781178a2613306063c511b78cd02684739288c0a01f400000000000002020d028b2d7a29d2e57efc6405a1dce1437180e3ce27100000000001068a71465e76d736564b0c90f5cf3d0d7b69c461c36f69250ae27dbead147cc8f80bb80000000000000206354def8b7e6b2ee04bf85c00f5e79f173d0b76d5017bab3a90c7ba62e1722699000000000000010245f3ad9e63f629be6e278cc4cf34d3b0a79a4a0b27100000000000010404b154dbcd3c75580382c2353082df4390613d93c627120000000001011500cc7d9c2b460720a48cc7444d7e7dfe43f6050bb80a03000000015c8dec5f0eedf1f8934815ef8fb8cb8198eac6520bb80a030000010286f3dd3b4d08de718d7909b0fdc16f4cbdf94ef527100000000000c001a0d4c12f6433ff6ea0573633364c030d8b46ed5764494f80eb434f27060c39f315a034df82c4ac185a666280d578992feee0c05fc75d93e3e2286726c85fba1bb0a0", + "0x02f8f68221058305c7b3830f4ef58401a5485d832dc6c094f2cb4e685946beecbc9ce5f318b68edc583bcfa080b88600000000000069073af31b777ac6b2082fc399fde92a814114b7896ca0b0503106910ea099d5e32c93bfc0013ed2850534c3f8583ab7276414416c0d15ac021126f6cb6ca1ed091ddc01eb01000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000c080a09694b95dc893bed698ede415c188db3530ccc98a01d79bb9f11d783de7dddde9a0275b0165ab21ea0e6f721c624aa2270a3f98276ca0c95381d90e3f9d434b4881", + "0x02f8f682210583034573830f4ef58401a5485d832dc6c094f2cb4e685946beecbc9ce5f318b68edc583bcfa080b88600000000000069073af31c970da8f2adb8bafe6d254ec4428f8342508e169f75e8450f6ff8488813dfa638395e16787966f01731fddffd0e7352cde07fd24bba283bd27f1828fb2a0c700701000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000c080a00181afe4bedab67692a9c1ff30a89fde6b3d3c8407a47a2777efcd6bdc0c39d2a022d6a4219e72eebdbc5d31ae998243ccec1b192c5c7c586308ccddb4838cd631", + "0x02f8c1822105830b0cfd830f4ed084013bce1b834c4b4094d599955d17a1378651e76557ffc406c71300fcb080b851020026000100271000c8e9d514f85b57b70de033e841d788ab4df1acd691802acc26dcd13fb9e38fa8e10001004e2000c8e9d55bd42770e29cb76904377ffdb22737fc9f5eb36fde875fcbfa687b1c3023c001a0d87c4e16986db55b8846bccfe7bca824b75216e72d8f92369c46681800285cb2a00ec53251be3c2a0d19884747d123ddb0ada3c0a917b21882e297e95c2294d52a", + "0x02f901d58221058306361d830f4240840163efbc8301546194833589fcd6edb6e08f4c7c32d4f71b54bda0291380b90164cf092995000000000000000000000000d723d9f752c19faf88a5fd2111a38d0cc5d395b00000000000000000000000000b55712de2ce8f93be30d53c03d48ea275cd14d000000000000000000000000000000000000000000000000000000000000003e8000000000000000000000000000000000000000000000000000000006907385e0000000000000000000000000000000000000000000000000000000069073be2bef9866b70d0bb74d8763996eb5967b1b24cd48f7801f94ad80cb49431df6b1d00000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000417c9c2382c6c3f029aa3dcbf1df075366fae7bc9fba7f3729713e0bf4d518951f5340350208db96af23686d9985ce552e3588244456a23ca99ecbcae779ea11e71c00000000000000000000000000000000000000000000000000000000000000c080a0b1090c8c67ca9a49ba3591c72c8851f187bbfc39b1920dff2f6c0157ed1ada39a0265b7f704f4c1b5c2c5ca57f1a4040e1e48878c9ad5f2cca9c4e6669d12989f2", + "0x02f8c1822105830b0c98830f424084013bc18b834c4b4094d599955d17a1378651e76557ffc406c71300fcb080b851020026000100271000c8e9d514f85b57b70de033e841d788ab4df1acd691802acc26dcd13fb9e38fa8e10001004e2000c8e9d55bd42770e29cb76904377ffdb22737fc9f5eb36fde875fcbfa687b1c3023c001a080a96d18ae46b58d9a470846a05b394ab4a49a2e379de1941205684e1ac291f9a01e6d4d2c6bab5bf8b89f1df2d6beb85d9f1b3f3be73ca2b72e4ad2d9da0d12d2", + "0x02f901d48221058231e0830f4240840163efbc8301544d94833589fcd6edb6e08f4c7c32d4f71b54bda0291380b90164cf0929950000000000000000000000001de8dbc2409c4bbf14445b0d404bb894f0c6cff70000000000000000000000008d8fa42584a727488eeb0e29405ad794a105bb9b0000000000000000000000000000000000000000000000000000000000002710000000000000000000000000000000000000000000000000000000006907385d0000000000000000000000000000000000000000000000000000000069073af16b129c414484e011621c44e0b32451fdbd69e63ef4919f427dde08c16cb199b100000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000041ae0a4b618c30f0e5d92d7fe99bb435413b2201711427699fd285f69666396cee76199d4e901cfb298612cb3b8ad06178cefb4136a8bc1be07c01b5fea80e5ec11b00000000000000000000000000000000000000000000000000000000000000c080a0af315068084aae367f00263dbd872908bbb9ceaefd6b792fc48dd357e6bdf8afa01e7f0e5913570394b9648939ef71fc5ac34fe320a2757ec388316731a335e69f", + "0x02f9022f82210583052d0b830f423f84025c5527833d090094c90d989d809e26b2d95fb72eb3288fef72af8c2f80b901be00000000000069073af31cf0f932cecc8c4c6ffffa554a63e8fba251434483ed3903966d2ba5a70121618a1c45bd9ee158192ab8d7e12ce0f447f2848a48aedaa89e0efa8637bb931745de05000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e2000000000000003045a9ad2bb92b0b3e5c571fdd5125114e04e02be1a0bb80000000001036e55486ea6b8691ba58224f3cae35505add86c372710000000000003681d6e4b0b020656ca04956ddaf76add7ef022f60dac0000000001010206777762d3eb91810b15526c2c9102864d722ef7a9ed24e77271c1dcbf0fdcba68138800000000010698c8f03094a9e65ccedc14c40130e4a5dd0ce14fb12ea58cbeac11f662b458b9271000000000000002005554419ccd0293d9383901f461c7c3e0c66e925f0bb80000000001028eb9437532fac8d6a7870f3f887b7978d20355fc271000000000000003035d28f920c9d23100e4a38b2ba2d8ae617c3b261501f4000000000102bc51db8aec659027ae0b0e468c0735418161a7800bb8000000000003dbc6998296caa1652a810dc8d3baf4a8294330f100500000000000c080a040000b130b1759df897a9573691a3d1cafacc6d95d0db1826f275afc30e2ff63a0400a7514f8d5383970c4412205ec8e9c6ca06acea504acabd2d3c36e9cb5003d" + ], + "withdrawals": [], + "withdrawals_root": "0x81864c23f426ad807d66c9fdde33213e1fdbac06c1b751d279901d1ce13670ac" + }, + "index": 10, + "metadata": { + "block_number": 37646058, + "new_account_balances": { + "0x000000000022d473030f116ddee9f6b43ac78ba3": "0x0", + "0x0000000071727de22e5e9d8baf0edac6f37da032": "0x23281e39594556899", + "0x0000f90827f1c53a10cb7a02335b175320002935": "0x0", + "0x000f3df6d732807ef1319fb7b8bb8522d0beac02": "0x0" + }, + "receipts": { + "0x1a766690fd6d0febffc488f12fbd7385c43fbe1e07113a1316f22f176355297e": { + "Legacy": { + "cumulativeGasUsed": "0x2868d76", + "logs": [ + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x0000000000000000000000000000000000000000000000000000000004b2ee6f", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x00000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f4030", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x0000000000000000000000000000000000000000000000000000000004b2ee6f", + "topics": [ + "0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "0x0000000000000000000000000000000000001ff3684f28c67538d4d072c22734" + ] + }, + { + "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "data": "0x000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000044095ea7b30000000000000000000000000000000000001ff3684f28c67538d4d072c227340000000000000000000000000000000000000000000000000000000004b2ee6f00000000000000000000000000000000000000000000000000000000", + "topics": [ + "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x00000000000000000000000000000000000000000000000000000000000133f4", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "0x000000000000000000000000f70da97812cb96acdf810712aa562db8dfa3dbef" + ] + }, + { + "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "data": "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e2220000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000000001000000000000000000000000f70da97812cb96acdf810712aa562db8dfa3dbef000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000133f400000000000000000000000000000000000000000000000000000000", + "topics": [ + "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1ba7b", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177" + ] + }, + { + "address": "0x8f360baf899845441eccdc46525e26bb8860752a", + "data": "0x00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000001957cc57b7a9959c0000000000000000000000000000000000000000000000001957cc57b7a9959800000000000000000000000000000000000000000000000444e308096a22c339000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000092458cc3a866f04600000000000000000000000000000000000000000000000025f3e27916e84b59000", + "topics": [ + "0x4e1d56f7310a8c32b2267f756b19ba65019b4890068ce114a25009abe54de5ba" + ] + }, + { + "address": "0xba12222222228d8ba445958a75a0704d566bf2c8", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1ba7b0000000000000000000000000000000000000000000000000000000004b1a44c", + "topics": [ + "0x2170c741c41531aec20e7c107c24eecfdd15e69c9bb0a8dd37b1840b9e0b207b", + "0x8f360baf899845441eccdc46525e26bb8860752a0002000000000000000001cd", + "0x000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "0x000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1ba7b", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177", + "0x000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c8" + ] + }, + { + "address": "0xd9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1a44c", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c8", + "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177" + ] + }, + { + "address": "0xd9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1a44c", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222" + ] + }, + { + "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "data": "0x0000000000000000000000000000000000001ff3684f28c67538d4d072c227340000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007e42213bc0b000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b000000000000000000000000ea758cac6115309b325c582fd0782d79e350217700000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000007041fff991f000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b06d9200000000000000000000000000000000000000000000000000000000000000a0d311e79cd2099f6f1f0607040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000058000000000000000000000000000000000000000000000000000000000000000e4c1fb425e000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000069073bb900000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003c438c9c147000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000002710000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c800000000000000000000000000000000000000000000000000000000000001c400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000002e4945bcec9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000220000000000000000000000000ea758cac6115309b325c582fd0782d79e35021770000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000069073bb9000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000208f360baf899845441eccdc46525e26bb8860752a0002000000000000000001cd000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca00000000000000000000000000000000000000000000000000000000000000027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008434ee90ca000000000000000000000000f5c4f3dc02c3fb9279495a8fef7b0741da956157000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b1a7880000000000000000000000000000000000000000000000000000000000002710000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "topics": [ + "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" + ] + }, + { + "address": "0xd9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1a44c", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "0x00000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f4030" + ] + }, + { + "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "data": "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e2220000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca000000000000000000000000000000000000000000000000000000000000000100000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f40300000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "topics": [ + "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" + ] + } + ], + "status": "0x1" + } + }, + "0x2cd6b4825b5ee40b703c947e15630336dceda97825b70412da54ccc27f484496": { + "Eip1559": { + "cumulativeGasUsed": "0x28cca69", + "logs": [ + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x", + "topics": [ + "0x98de503528ee59b575ef0c0a2576a82497bfc029a5685b209e9ec333479b10a5", + "0x000000000000000000000000d723d9f752c19faf88a5fd2111a38d0cc5d395b0", + "0xbef9866b70d0bb74d8763996eb5967b1b24cd48f7801f94ad80cb49431df6b1d" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x00000000000000000000000000000000000000000000000000000000000003e8", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000d723d9f752c19faf88a5fd2111a38d0cc5d395b0", + "0x0000000000000000000000000b55712de2ce8f93be30d53c03d48ea275cd14d0" + ] + } + ], + "status": "0x1" + } + } + } + }, + "payload_id": "0x0316ecb1aa1671b5" +}"#; + + let flashblock: OpFlashblockPayload = serde_json::from_str(raw).expect("deserialize"); + let serialized = serde_json::to_string(&flashblock).expect("serialize"); + let roundtrip: OpFlashblockPayload = serde_json::from_str(&serialized).expect("roundtrip"); + + assert_eq!(flashblock, roundtrip); + } +} diff --git a/crates/flashblocks/src/cache/utils.rs b/crates/flashblocks/src/cache/utils.rs index 6cd5f8c7..32936a90 100644 --- a/crates/flashblocks/src/cache/utils.rs +++ b/crates/flashblocks/src/cache/utils.rs @@ -4,3 +4,46 @@ use reth_rpc_eth_types::block::BlockAndReceipts; pub(crate) fn block_from_bar(bar: &BlockAndReceipts) -> BlockTy { BlockTy::::new(bar.block.header().clone(), bar.block.body().clone()) } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::{BlockHeader, Header}; + use alloy_primitives::B256; + use reth_optimism_primitives::{OpBlock, OpPrimitives}; + use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; + use reth_rpc_eth_types::block::BlockAndReceipts; + use std::sync::Arc; + + /// Builds a minimal `BlockAndReceipts` for testing. + fn make_block_and_receipts( + block_number: u64, + parent_hash: B256, + ) -> BlockAndReceipts { + let header = Header { number: block_number, parent_hash, ..Default::default() }; + let sealed_header = SealedHeader::seal_slow(header); + let block = OpBlock::new(sealed_header.unseal(), Default::default()); + let sealed_block = SealedBlock::seal_slow(block); + let recovered_block = RecoveredBlock::new_sealed(sealed_block, vec![]); + BlockAndReceipts { block: Arc::new(recovered_block), receipts: Arc::new(vec![]) } + } + + #[test] + fn test_block_from_bar_returns_block_with_correct_number() { + let bar = make_block_and_receipts(42, B256::ZERO); + let block = block_from_bar::(&bar); + assert_eq!(block.header().number(), 42, "block_from_bar should preserve the block number"); + } + + #[test] + fn test_block_from_bar_returns_block_with_correct_parent_hash() { + let parent = B256::repeat_byte(0xBE); + let bar = make_block_and_receipts(10, parent); + let block = block_from_bar::(&bar); + assert_eq!( + block.header().parent_hash(), + parent, + "block_from_bar should preserve the parent hash" + ); + } +} From c18a37da1e22162245c8124e52da8ff6cf4efbce Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 16 Mar 2026 10:52:57 +0800 Subject: [PATCH 27/76] chore: fix tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- crates/flashblocks/src/cache/confirm.rs | 167 ++++------------- crates/flashblocks/src/cache/pending.rs | 55 +----- crates/flashblocks/src/test_utils.rs | 230 ++++++++++-------------- 3 files changed, 141 insertions(+), 311 deletions(-) diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index 1045088d..3acdfefb 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -231,110 +231,19 @@ impl ConfirmCache { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{BlockHeader, Header, Receipt, TxEip7702}; - use alloy_primitives::{Address, Bytes, PrimitiveSignature, B256, U256}; - use op_alloy_consensus::OpTypedTransaction; - use reth_chain_state::{ComputedTrieData, ExecutedBlock}; - use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; - use reth_optimism_primitives::{ - OpBlock, OpBlockBody, OpPrimitives, OpReceipt, OpTransactionSigned, - }; - use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; - use std::sync::Arc; - - type TestConfirmCache = ConfirmCache; - - fn make_executed_block(block_number: u64, parent_hash: B256) -> ExecutedBlock { - let header = Header { number: block_number, parent_hash, ..Default::default() }; - let sealed_header = SealedHeader::seal_slow(header); - let block = OpBlock::new(sealed_header.unseal(), Default::default()); - let sealed_block = SealedBlock::seal_slow(block); - let recovered_block = RecoveredBlock::new_sealed(sealed_block, vec![]); - let execution_output = Arc::new(BlockExecutionOutput { - result: BlockExecutionResult { - receipts: vec![], - requests: Default::default(), - gas_used: 0, - blob_gas_used: 0, - }, - state: Default::default(), - }); - ExecutedBlock::new(Arc::new(recovered_block), execution_output, ComputedTrieData::default()) - } - - fn empty_receipts() -> Arc> { - Arc::new(vec![]) - } - - fn mock_tx(nonce: u64) -> OpTransactionSigned { - let tx = TxEip7702 { - chain_id: 1u64, - nonce, - max_fee_per_gas: 0x28f000fff, - max_priority_fee_per_gas: 0x28f000fff, - gas_limit: 21_000, - to: Address::default(), - value: U256::ZERO, - input: Bytes::new(), - access_list: Default::default(), - authorization_list: Default::default(), - }; - let signature = PrimitiveSignature::new(U256::default(), U256::default(), true); - OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature) - } - - fn make_executed_block_with_txs( - block_number: u64, - parent_hash: B256, - nonce_start: u64, - count: usize, - ) -> (ExecutedBlock, Arc>) { - let txs: Vec = - (0..count).map(|i| mock_tx(nonce_start + i as u64)).collect(); - let senders: Vec
= (0..count).map(|_| Address::default()).collect(); - let receipts: Vec = (0..count) - .map(|i| { - OpReceipt::Eip7702(Receipt { - status: true.into(), - cumulative_gas_used: 21_000 * (i as u64 + 1), - logs: vec![], - }) - }) - .collect(); - - let header = Header { number: block_number, parent_hash, ..Default::default() }; - let sealed_header = SealedHeader::seal_slow(header); - let body = OpBlockBody { transactions: txs, ..Default::default() }; - let block = OpBlock::new(sealed_header.unseal(), body); - let sealed_block = SealedBlock::seal_slow(block); - let recovered_block = RecoveredBlock::new_sealed(sealed_block, senders); - let execution_output = Arc::new(BlockExecutionOutput { - result: BlockExecutionResult { - receipts: receipts.clone(), - requests: Default::default(), - gas_used: 21_000 * count as u64, - blob_gas_used: 0, - }, - state: Default::default(), - }); - let executed = ExecutedBlock::new( - Arc::new(recovered_block), - execution_output, - ComputedTrieData::default(), - ); - (executed, Arc::new(receipts)) - } + use crate::test_utils::{empty_receipts, make_executed_block}; + use reth_optimism_primitives::OpPrimitives; #[test] fn test_confirm_cache_new_is_empty() { - let cache = TestConfirmCache::new(); + let cache = ConfirmCache::::new(); assert!(cache.is_empty()); assert_eq!(cache.len(), 0); } #[test] fn test_confirm_cache_insert_single_block_increases_len() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block = make_executed_block(1, B256::ZERO); cache.insert(1, block, empty_receipts()).expect("insert should succeed"); assert_eq!(cache.len(), 1); @@ -343,7 +252,7 @@ mod tests { #[test] fn test_confirm_cache_insert_fails_at_max_capacity() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let mut parent = B256::ZERO; for height in 1..=(DEFAULT_CONFIRM_BLOCK_CACHE_SIZE as u64) { let block = make_executed_block(height, parent); @@ -359,7 +268,7 @@ mod tests { #[test] fn test_confirm_cache_get_block_by_number_returns_correct_block() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block = make_executed_block(42, B256::ZERO); cache.insert(42, block, empty_receipts()).expect("insert"); let result = cache.get_block_by_number(42); @@ -369,7 +278,7 @@ mod tests { #[test] fn test_confirm_cache_get_block_by_number_returns_none_for_wrong_number() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block = make_executed_block(42, B256::ZERO); cache.insert(42, block, empty_receipts()).expect("insert"); assert!(cache.get_block_by_number(43).is_none()); @@ -378,7 +287,7 @@ mod tests { #[test] fn test_confirm_cache_get_block_by_hash_returns_correct_block() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block = make_executed_block(42, B256::ZERO); let block_hash = block.recovered_block.hash(); cache.insert(42, block, empty_receipts()).expect("insert"); @@ -389,7 +298,7 @@ mod tests { #[test] fn test_confirm_cache_get_block_by_hash_returns_none_for_unknown_hash() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block = make_executed_block(42, B256::ZERO); cache.insert(42, block, empty_receipts()).expect("insert"); assert!(cache.get_block_by_hash(&B256::repeat_byte(0xFF)).is_none()); @@ -397,7 +306,7 @@ mod tests { #[test] fn test_confirm_cache_number_for_hash_returns_correct_mapping() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block = make_executed_block(10, B256::ZERO); let hash = block.recovered_block.hash(); cache.insert(10, block, empty_receipts()).expect("insert"); @@ -406,7 +315,7 @@ mod tests { #[test] fn test_confirm_cache_hash_for_number_returns_correct_mapping() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block = make_executed_block(10, B256::ZERO); let expected_hash = block.recovered_block.hash(); cache.insert(10, block, empty_receipts()).expect("insert"); @@ -415,7 +324,7 @@ mod tests { #[test] fn test_confirm_cache_clear_removes_all_entries() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block = make_executed_block(1, B256::ZERO); cache.insert(1, block, empty_receipts()).expect("insert"); cache.clear(); @@ -425,7 +334,7 @@ mod tests { #[test] fn test_confirm_cache_flush_up_to_height_removes_entries_at_or_below_height() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let mut parent = B256::ZERO; for height in 1..=5 { let block = make_executed_block(height, parent); @@ -442,7 +351,7 @@ mod tests { #[test] fn test_confirm_cache_flush_up_to_height_higher_than_all_removes_all() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let mut parent = B256::ZERO; for height in 1..=3 { let block = make_executed_block(height, parent); @@ -456,7 +365,7 @@ mod tests { #[test] fn test_confirm_cache_flush_up_to_height_zero_removes_nothing() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block = make_executed_block(1, B256::ZERO); cache.insert(1, block, empty_receipts()).expect("insert"); let count = cache.flush_up_to_height(0); @@ -466,7 +375,7 @@ mod tests { #[test] fn test_confirm_cache_flush_removes_hash_indices_for_all_flushed_blocks() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let mut parent = B256::ZERO; let mut hashes = vec![]; for height in 1..=3 { @@ -484,7 +393,7 @@ mod tests { #[test] fn test_confirm_cache_remove_block_by_number_returns_block_and_cleans_indices() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block = make_executed_block(5, B256::ZERO); let block_hash = block.recovered_block.hash(); cache.insert(5, block, empty_receipts()).expect("insert"); @@ -497,7 +406,7 @@ mod tests { #[test] fn test_confirm_cache_remove_block_by_hash_returns_block_and_cleans_indices() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block = make_executed_block(7, B256::ZERO); let block_hash = block.recovered_block.hash(); cache.insert(7, block, empty_receipts()).expect("insert"); @@ -511,7 +420,7 @@ mod tests { #[test] fn test_confirm_cache_get_executed_blocks_up_to_height_returns_contiguous_blocks_newest_first() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block2 = make_executed_block(2, B256::repeat_byte(0x01)); let block3 = make_executed_block(3, block2.recovered_block.hash()); let block4 = make_executed_block(4, block3.recovered_block.hash()); @@ -527,14 +436,14 @@ mod tests { #[test] fn test_confirm_cache_get_executed_blocks_up_to_height_returns_empty_on_empty_cache() { - let cache = TestConfirmCache::new(); + let cache = ConfirmCache::::new(); let result = cache.get_executed_blocks_up_to_height(5, 1); assert!(result.unwrap().is_empty()); } #[test] fn test_confirm_cache_get_executed_blocks_detects_gap_between_canonical_and_overlay() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block3 = make_executed_block(3, B256::ZERO); cache.insert(3, block3, empty_receipts()).expect("insert 3"); assert!(cache.get_executed_blocks_up_to_height(3, 1).is_err()); @@ -542,7 +451,7 @@ mod tests { #[test] fn test_confirm_cache_get_executed_blocks_detects_non_contiguous_overlay() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block2 = make_executed_block(2, B256::repeat_byte(0x01)); let block4 = make_executed_block(4, B256::repeat_byte(0x03)); cache.insert(2, block2, empty_receipts()).expect("insert 2"); @@ -552,7 +461,7 @@ mod tests { #[test] fn test_confirm_cache_get_executed_blocks_allows_redundant_overlap_with_canonical() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block2 = make_executed_block(2, B256::repeat_byte(0x01)); let block3 = make_executed_block(3, block2.recovered_block.hash()); cache.insert(2, block2, empty_receipts()).expect("insert 2"); @@ -563,7 +472,7 @@ mod tests { #[test] fn test_confirm_cache_get_executed_blocks_single_block_contiguous_with_canonical() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block5 = make_executed_block(5, B256::repeat_byte(0x04)); cache.insert(5, block5, empty_receipts()).expect("insert 5"); let blocks = cache.get_executed_blocks_up_to_height(5, 4).unwrap(); @@ -573,7 +482,7 @@ mod tests { #[test] fn test_confirm_cache_get_executed_blocks_returns_subset_up_to_target() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block2 = make_executed_block(2, B256::repeat_byte(0x01)); let block3 = make_executed_block(3, block2.recovered_block.hash()); let block4 = make_executed_block(4, block3.recovered_block.hash()); @@ -590,7 +499,7 @@ mod tests { #[test] fn test_confirm_cache_insert_same_height_twice_keeps_cache_len_at_one() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block_a = make_executed_block(10, B256::ZERO); let block_b = make_executed_block(10, B256::repeat_byte(0xFF)); cache.insert(10, block_a, empty_receipts()).expect("first insert"); @@ -600,13 +509,13 @@ mod tests { #[test] fn test_confirm_cache_get_tx_info_returns_none_for_unknown_hash() { - let cache = TestConfirmCache::new(); + let cache = ConfirmCache::::new(); assert!(cache.get_tx_info(&B256::repeat_byte(0xAA)).is_none()); } #[test] fn test_confirm_cache_insert_builds_tx_index_correctly() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 3); let block_hash = block.recovered_block.hash(); let tx_hashes: Vec<_> = @@ -624,35 +533,35 @@ mod tests { #[test] fn test_confirm_cache_flush_cleans_tx_index() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); let tx_hashes: Vec<_> = block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); cache.insert(1, block, receipts).expect("insert"); cache.flush_up_to_height(1); - for tx_hash in &tx_hashes { + for tx_hash in tx_hashes.iter() { assert!(cache.get_tx_info(tx_hash).is_none()); } } #[test] fn test_confirm_cache_remove_block_by_number_cleans_tx_index() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let (block, receipts) = make_executed_block_with_txs(5, B256::ZERO, 0, 2); let tx_hashes: Vec<_> = block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); cache.insert(5, block, receipts).expect("insert"); cache.remove_block_by_number(5); - for tx_hash in &tx_hashes { + for tx_hash in tx_hashes.iter() { assert!(cache.get_tx_info(tx_hash).is_none()); } } #[test] fn test_confirm_cache_insert_duplicate_height_leaks_stale_hash_index() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let block_a = make_executed_block(10, B256::ZERO); let hash_a = block_a.recovered_block.hash(); let block_b = make_executed_block(10, B256::repeat_byte(0xFF)); @@ -673,7 +582,7 @@ mod tests { #[test] fn test_confirm_cache_flush_cleans_tx_index_for_partial_flush() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let (block1, receipts1) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); let tx_hashes_1: Vec<_> = block1.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); @@ -686,24 +595,24 @@ mod tests { cache.insert(2, block2, receipts2).expect("insert 2"); cache.flush_up_to_height(1); - for tx_hash in &tx_hashes_1 { + for tx_hash in tx_hashes_1.iter() { assert!(cache.get_tx_info(tx_hash).is_none(), "block 1 tx should be gone"); } - for tx_hash in &tx_hashes_2 { + for tx_hash in tx_hashes_2.iter() { assert!(cache.get_tx_info(tx_hash).is_some(), "block 2 tx should remain"); } } #[test] fn test_confirm_cache_clear_cleans_tx_index() { - let mut cache = TestConfirmCache::new(); + let mut cache = ConfirmCache::::new(); let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); let tx_hashes: Vec<_> = block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); cache.insert(1, block, receipts).expect("insert"); cache.clear(); - for tx_hash in &tx_hashes { + for tx_hash in tx_hashes.iter() { assert!(cache.get_tx_info(tx_hash).is_none()); } } diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs index d0655be1..65ceec61 100644 --- a/crates/flashblocks/src/cache/pending.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -65,33 +65,15 @@ impl PendingSequence { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{BlockHeader, Header, Receipt, TxEip7702}; - use alloy_primitives::{Address, Bytes, PrimitiveSignature, B256, U256}; - use op_alloy_consensus::{OpReceipt, OpTypedTransaction}; - use reth_chain_state::{ComputedTrieData, ExecutedBlock}; - use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; - use reth_optimism_primitives::{OpBlock, OpPrimitives, OpTransactionSigned}; - use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; + use crate::test_utils::{make_executed_block, mock_tx}; + use std::{collections::HashMap, time::Instant}; + + use alloy_consensus::Receipt; + use alloy_primitives::B256; + use op_alloy_consensus::OpReceipt; + + use reth_optimism_primitives::OpPrimitives; use reth_rpc_eth_types::PendingBlock; - use std::{collections::HashMap, sync::Arc, time::Instant}; - - fn make_executed_block(block_number: u64, parent_hash: B256) -> ExecutedBlock { - let header = Header { number: block_number, parent_hash, ..Default::default() }; - let sealed_header = SealedHeader::seal_slow(header); - let block = OpBlock::new(sealed_header.unseal(), Default::default()); - let sealed_block = SealedBlock::seal_slow(block); - let recovered_block = RecoveredBlock::new_sealed(sealed_block, vec![]); - let execution_output = Arc::new(BlockExecutionOutput { - result: BlockExecutionResult { - receipts: vec![], - requests: Default::default(), - gas_used: 0, - blob_gas_used: 0, - }, - state: Default::default(), - }); - ExecutedBlock::new(Arc::new(recovered_block), execution_output, ComputedTrieData::default()) - } fn make_pending_sequence(block_number: u64) -> PendingSequence { let executed = make_executed_block(block_number, B256::ZERO); @@ -108,29 +90,10 @@ mod tests { ) } - fn mock_tx(nonce: u64) -> OpTransactionSigned { - let tx = TxEip7702 { - chain_id: 1u64, - nonce, - max_fee_per_gas: 0x28f000fff, - max_priority_fee_per_gas: 0x28f000fff, - gas_limit: 21_000, - to: Address::default(), - value: U256::ZERO, - input: Bytes::new(), - access_list: Default::default(), - authorization_list: Default::default(), - }; - let signature = PrimitiveSignature::new(U256::default(), U256::default(), true); - OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature) - } - fn make_pending_sequence_with_txs( block_number: u64, tx_count: usize, ) -> PendingSequence { - use alloy_consensus::transaction::TxHashRef; - let executed = make_executed_block(block_number, B256::ZERO); let block_hash = executed.recovered_block.hash(); let parent_hash = executed.recovered_block.parent_hash(); @@ -188,8 +151,6 @@ mod tests { #[test] fn test_pending_sequence_get_tx_info_returns_correct_info_for_known_tx() { - use alloy_consensus::transaction::TxHashRef; - let seq = make_pending_sequence_with_txs(42, 3); let (tx_hash, expected_info) = seq.tx_index.iter().next().unwrap(); let (info, bar) = seq.get_tx_info(tx_hash).expect("known tx hash should return Some"); diff --git a/crates/flashblocks/src/test_utils.rs b/crates/flashblocks/src/test_utils.rs index e1f2bfd9..372c2bf0 100644 --- a/crates/flashblocks/src/test_utils.rs +++ b/crates/flashblocks/src/test_utils.rs @@ -1,73 +1,105 @@ -//! Test utilities for flashblocks. -//! -//! Provides a factory for creating test flashblocks with automatic timestamp management. -//! -//! # Examples -//! -//! ## Simple: Create a flashblock sequence for the same block -//! -//! ```ignore -//! let factory = TestFlashBlockFactory::new(); // Default 2 second block time -//! let fb0 = factory.flashblock_at(0).build(); -//! let fb1 = factory.flashblock_after(&fb0).build(); -//! let fb2 = factory.flashblock_after(&fb1).build(); -//! ``` -//! -//! ## Create flashblocks with transactions -//! -//! ```ignore -//! let factory = TestFlashBlockFactory::new(); -//! let fb0 = factory.flashblock_at(0).build(); -//! let txs = vec![Bytes::from_static(&[1, 2, 3])]; -//! let fb1 = factory.flashblock_after(&fb0).transactions(txs).build(); -//! ``` -//! -//! ## Test across multiple blocks (timestamps auto-increment) -//! -//! ```ignore -//! let factory = TestFlashBlockFactory::new(); // Default 2 second blocks -//! -//! // Block 100 at timestamp 1000000 -//! let fb0 = factory.flashblock_at(0).build(); -//! let fb1 = factory.flashblock_after(&fb0).build(); -//! -//! // Block 101 at timestamp 1000002 (auto-incremented by block_time) -//! let fb2 = factory.flashblock_for_next_block(&fb1).build(); -//! let fb3 = factory.flashblock_after(&fb2).build(); -//! ``` -//! -//! ## Full control with builder -//! -//! ```ignore -//! let factory = TestFlashBlockFactory::new(); -//! let fb = factory.builder() -//! .block_number(100) -//! .parent_hash(specific_hash) -//! .state_root(computed_root) -//! .transactions(txs) -//! .build(); -//! ``` +use std::sync::Arc; -use alloy_primitives::{Address, Bloom, Bytes, B256, U256}; +use alloy_consensus::{Header, Receipt, TxEip7702}; +use alloy_primitives::{Address, Bloom, Bytes, Signature, B256, U256}; use alloy_rpc_types_engine::PayloadId; +use op_alloy_consensus::OpTypedTransaction; use op_alloy_rpc_types_engine::{ OpFlashblockPayload, OpFlashblockPayloadBase, OpFlashblockPayloadDelta, OpFlashblockPayloadMetadata, }; -/// Factory for creating test flashblocks with automatic timestamp management. -/// -/// Tracks `block_time` to automatically increment timestamps when creating new blocks. -/// Returns builders that can be further customized before calling `build()`. -/// -/// # Examples -/// -/// ```ignore -/// let factory = TestFlashBlockFactory::new(); // Default 2 second block time -/// let fb0 = factory.flashblock_at(0).build(); -/// let fb1 = factory.flashblock_after(&fb0).build(); -/// let fb2 = factory.flashblock_for_next_block(&fb1).build(); // timestamp auto-increments -/// ``` +use reth_chain_state::{ComputedTrieData, ExecutedBlock}; +use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; +use reth_optimism_primitives::{ + OpBlock, OpBlockBody, OpPrimitives, OpReceipt, OpTransactionSigned, +}; +use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; + +pub(crate) fn mock_tx(nonce: u64) -> OpTransactionSigned { + let tx = TxEip7702 { + chain_id: 1u64, + nonce, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 10, + to: Address::default(), + value: U256::from(3_u64), + input: Bytes::from(vec![1, 2]), + access_list: Default::default(), + authorization_list: Default::default(), + }; + let signature = Signature::new(U256::default(), U256::default(), true); + OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature) +} + +pub(crate) fn make_executed_block( + block_number: u64, + parent_hash: B256, +) -> ExecutedBlock { + let header = Header { number: block_number, parent_hash, ..Default::default() }; + let sealed_header = SealedHeader::seal_slow(header); + let block = OpBlock::new(sealed_header.unseal(), Default::default()); + let sealed_block = SealedBlock::seal_slow(block); + let recovered_block = RecoveredBlock::new_sealed(sealed_block, vec![]); + let execution_output = Arc::new(BlockExecutionOutput { + result: BlockExecutionResult { + receipts: vec![], + requests: Default::default(), + gas_used: 0, + blob_gas_used: 0, + }, + state: Default::default(), + }); + ExecutedBlock::new(Arc::new(recovered_block), execution_output, ComputedTrieData::default()) +} + +pub(crate) fn empty_receipts() -> Arc> { + Arc::new(vec![]) +} + +pub(crate) fn make_executed_block_with_txs( + block_number: u64, + parent_hash: B256, + nonce_start: u64, + count: usize, +) -> (ExecutedBlock, Arc>) { + let txs: Vec = + (0..count).map(|i| mock_tx(nonce_start + i as u64)).collect(); + let senders: Vec
= (0..count).map(|_| Address::default()).collect(); + let receipts: Vec = (0..count) + .map(|i| { + OpReceipt::Eip7702(Receipt { + status: true.into(), + cumulative_gas_used: 21_000 * (i as u64 + 1), + logs: vec![], + }) + }) + .collect(); + + let header = Header { number: block_number, parent_hash, ..Default::default() }; + let sealed_header = SealedHeader::seal_slow(header); + let body = OpBlockBody { transactions: txs, ..Default::default() }; + let block = OpBlock::new(sealed_header.unseal(), body); + let sealed_block = SealedBlock::seal_slow(block); + let recovered_block = RecoveredBlock::new_sealed(sealed_block, senders); + let execution_output = Arc::new(BlockExecutionOutput { + result: BlockExecutionResult { + receipts: receipts.clone(), + requests: Default::default(), + gas_used: 21_000 * count as u64, + blob_gas_used: 0, + }, + state: Default::default(), + }); + let executed = ExecutedBlock::new( + Arc::new(recovered_block), + execution_output, + ComputedTrieData::default(), + ); + (executed, Arc::new(receipts)) +} + #[derive(Debug)] pub(crate) struct TestFlashBlockFactory { /// Block time in seconds (used to auto-increment timestamps) @@ -79,8 +111,6 @@ pub(crate) struct TestFlashBlockFactory { } impl TestFlashBlockFactory { - /// Creates a new factory with a default block time of 2 seconds. - /// /// Use [`with_block_time`](Self::with_block_time) to customize the block time. pub(crate) fn new() -> Self { Self { block_time: 2, base_timestamp: 1_000_000, current_block_number: 100 } @@ -91,34 +121,10 @@ impl TestFlashBlockFactory { self } - /// Creates a builder for a flashblock at the specified index (within the current block). - /// - /// Returns a builder with index set, allowing further customization before building. - /// - /// # Examples - /// - /// ```ignore - /// let factory = TestFlashBlockFactory::new(); - /// let fb0 = factory.flashblock_at(0).build(); // Simple usage - /// let fb1 = factory.flashblock_at(1).state_root(specific_root).build(); // Customize - /// ``` pub(crate) fn flashblock_at(&self, index: u64) -> TestFlashBlockBuilder { self.builder().index(index).block_number(self.current_block_number) } - /// Creates a builder for a flashblock following the previous one in the same sequence. - /// - /// Automatically increments the index and maintains `block_number` and `payload_id`. - /// Returns a builder allowing further customization. - /// - /// # Examples - /// - /// ```ignore - /// let factory = TestFlashBlockFactory::new(); - /// let fb0 = factory.flashblock_at(0).build(); - /// let fb1 = factory.flashblock_after(&fb0).build(); // Simple - /// let fb2 = factory.flashblock_after(&fb1).transactions(txs).build(); // With txs - /// ``` pub(crate) fn flashblock_after(&self, previous: &OpFlashblockPayload) -> TestFlashBlockBuilder { let parent_hash = previous.base.as_ref().map(|b| b.parent_hash).unwrap_or(previous.diff.block_hash); @@ -131,20 +137,6 @@ impl TestFlashBlockFactory { .timestamp(previous.base.as_ref().map(|b| b.timestamp).unwrap_or(self.base_timestamp)) } - /// Creates a builder for a flashblock for the next block, starting a new sequence at index 0. - /// - /// Increments block number, uses previous `block_hash` as `parent_hash`, generates new - /// `payload_id`, and automatically increments the timestamp by `block_time`. - /// Returns a builder allowing further customization. - /// - /// # Examples - /// - /// ```ignore - /// let factory = TestFlashBlockFactory::new(); // 2 second blocks - /// let fb0 = factory.flashblock_at(0).build(); // Block 100, timestamp 1000000 - /// let fb1 = factory.flashblock_for_next_block(&fb0).build(); // Block 101, timestamp 1000002 - /// let fb2 = factory.flashblock_for_next_block(&fb1).transactions(txs).build(); // Customize - /// ``` pub(crate) fn flashblock_for_next_block( &self, previous: &OpFlashblockPayload, @@ -160,21 +152,6 @@ impl TestFlashBlockFactory { .timestamp(prev_timestamp + self.block_time) } - /// Returns a custom builder for full control over flashblock creation. - /// - /// Use this when the convenience methods don't provide enough control. - /// - /// # Examples - /// - /// ```ignore - /// let factory = TestFlashBlockFactory::new(); - /// let fb = factory.builder() - /// .index(5) - /// .block_number(200) - /// .parent_hash(specific_hash) - /// .state_root(computed_root) - /// .build(); - /// ``` pub(crate) fn builder(&self) -> TestFlashBlockBuilder { TestFlashBlockBuilder { index: 0, @@ -196,9 +173,6 @@ impl TestFlashBlockFactory { } } -/// Custom builder for creating test flashblocks with full control. -/// -/// Created via [`TestFlashBlockFactory::builder()`]. #[derive(Debug)] pub(crate) struct TestFlashBlockBuilder { index: u64, @@ -219,80 +193,66 @@ pub(crate) struct TestFlashBlockBuilder { } impl TestFlashBlockBuilder { - /// Sets the flashblock index. pub(crate) fn index(mut self, index: u64) -> Self { self.index = index; self } - /// Sets the block number. pub(crate) fn block_number(mut self, block_number: u64) -> Self { self.block_number = block_number; self } - /// Sets the payload ID. pub(crate) fn payload_id(mut self, payload_id: PayloadId) -> Self { self.payload_id = payload_id; self } - /// Sets the parent hash. pub(crate) fn parent_hash(mut self, parent_hash: B256) -> Self { self.parent_hash = parent_hash; self } - /// Sets the timestamp. pub(crate) fn timestamp(mut self, timestamp: u64) -> Self { self.timestamp = timestamp; self } - /// Sets the base payload. Automatically created for index 0 if not set. #[allow(dead_code)] pub(crate) fn base(mut self, base: OpFlashblockPayloadBase) -> Self { self.base = Some(base); self } - /// Sets the block hash in the diff. #[allow(dead_code)] pub(crate) fn block_hash(mut self, block_hash: B256) -> Self { self.block_hash = block_hash; self } - /// Sets the state root in the diff. #[allow(dead_code)] pub(crate) fn state_root(mut self, state_root: B256) -> Self { self.state_root = state_root; self } - /// Sets the receipts root in the diff. #[allow(dead_code)] pub(crate) fn receipts_root(mut self, receipts_root: B256) -> Self { self.receipts_root = receipts_root; self } - /// Sets the transactions in the diff. pub(crate) fn transactions(mut self, transactions: Vec) -> Self { self.transactions = transactions; self } - /// Sets the gas used in the diff. #[allow(dead_code)] pub(crate) fn gas_used(mut self, gas_used: u64) -> Self { self.gas_used = gas_used; self } - /// Builds the flashblock. - /// - /// If index is 0 and no base was explicitly set, creates a default base. pub(crate) fn build(mut self) -> OpFlashblockPayload { // Auto-create base for index 0 if not set if self.index == 0 && self.base.is_none() { From 53c30db2e5fc88d64e9c273c878f047fe3f35b5a Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 16 Mar 2026 14:24:03 +0800 Subject: [PATCH 28/76] feat(flashblocks-rpc): add cached receipts to pending sequence and state provider helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- crates/flashblocks/src/cache/confirm.rs | 3 +- crates/flashblocks/src/cache/mod.rs | 145 +++++++++++++-------- crates/flashblocks/src/cache/pending.rs | 88 +++++++------ crates/flashblocks/src/subscription/rpc.rs | 19 ++- 4 files changed, 155 insertions(+), 100 deletions(-) diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index 3acdfefb..ddbd1883 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -231,7 +231,8 @@ impl ConfirmCache { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{empty_receipts, make_executed_block}; + use crate::test_utils::{empty_receipts, make_executed_block, make_executed_block_with_txs}; + use alloy_consensus::BlockHeader; use reth_optimism_primitives::OpPrimitives; #[test] diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 7907a782..37be3ab2 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -7,7 +7,7 @@ pub use confirm::ConfirmCache; pub use pending::PendingSequence; pub use raw::RawFlashblocksCache; -use crate::PendingSequenceRx; +use crate::{execution::FlashblockCachedReceipt, PendingSequenceRx}; use parking_lot::RwLock; use std::sync::Arc; use tokio::sync::watch; @@ -53,12 +53,18 @@ pub struct CachedTxInfo { /// state, ensuring atomic operations across pending, confirmed, and height /// state (e.g. reorg detection + flush + insert in `handle_confirmed_block`). #[derive(Debug, Clone)] -pub struct FlashblockStateCache { +pub struct FlashblockStateCache +where + N::Receipt: FlashblockCachedReceipt, +{ inner: Arc>>, } // FlashblockStateCache read interfaces -impl FlashblockStateCache { +impl FlashblockStateCache +where + N::Receipt: FlashblockCachedReceipt, +{ /// Creates a new [`FlashblockStateCache`]. pub fn new() -> Self { Self { inner: Arc::new(RwLock::new(FlashblockStateCacheInner::new())) } @@ -66,7 +72,10 @@ impl FlashblockStateCache { } // FlashblockStateCache read height interfaces -impl FlashblockStateCache { +impl FlashblockStateCache +where + N::Receipt: FlashblockCachedReceipt, +{ /// Returns the current confirmed height. pub fn get_confirm_height(&self) -> u64 { self.inner.read().confirm_height @@ -117,50 +126,18 @@ impl FlashblockStateCache { } /// Creates a `StateProviderBox` that overlays the flashblock execution state on top of the - /// canonical state for the given block ID. Instantiates a `MemoryOverlayStateProvider` by - /// getting the ordered `ExecutedBlock`s from the cache, and overlaying them on top of the - /// canonical state provider. + /// canonical state for the given block ID. /// /// For a specific block number/hash, returns all confirm cache blocks up to that height. /// For `Pending`, it also includes the current pending executed block state. /// For `Latest`, resolves to the confirm height. /// Returns `None` if the target block is not in the flashblocks cache. - /// - /// # Safety of the overlay - /// The returned blocks are meant to be layered on top of a canonical `StateProviderBox` - /// via `MemoryOverlayStateProvider`. This is correct **if and only if** the overlay - /// blocks form a contiguous chain from some height down to `canonical_height + 1` - /// (or `canonical_height` itself in the redundant-but-safe race case). - /// - /// **Safe (redundant overlap)**: Due to a race between canonical commit and confirm - /// cache flush, the lowest overlay block may equal the canonical height. For example, - /// canonical is at height `x` and the overlay contains `[x+2, x+1, x]`. This is safe - /// because `MemoryOverlayStateProvider` checks overlay blocks first (newest-to-oldest) - /// — the duplicate `BundleState` at height `x` contains changes identical to what - /// canonical already applied, so the result is correct regardless of which source - /// resolves the query. - /// - /// **State inconsistency (gap in overlay)**: If an intermediate block is missing (e.g. - /// overlay has `[x+2, x]` but not `x+1`), any account modified only at height `x+1` - /// would be invisible — the query falls through to canonical, returning stale state. - /// - /// **State inconsistency (canonical too far behind)**: If the canonical height is more - /// than one block below the lowest overlay block (e.g. canonical at `x-2`, lowest overlay - /// at `x`), changes at height `x-1` are not covered by either source. - /// - /// Both failure modes reduce to: every height between `canonical_height + 1` and the - /// target must be present in the overlay. This invariant is naturally maintained by - /// `handle_confirmed_block` (rejects non-consecutive heights) and the pending block always - /// being `confirm_height + 1`. - /// - /// On validation failure (non-contiguous overlay or gap to canonical), the cache is - /// flushed and `None` is returned. pub fn get_state_provider_by_id( &self, block_id: Option, canonical_state: StateProviderBox, ) -> Option<(StateProviderBox, SealedHeaderFor)> { - let guard = self.inner.read(); + let mut guard = self.inner.write(); let block = match block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)) { BlockId::Number(id) => match id { BlockNumberOrTag::Pending => guard.get_pending_block(), @@ -172,28 +149,27 @@ impl FlashblockStateCache { }? .block; let block_num = block.number(); + let in_memory = guard.get_state_provider_at_height(block_num, canonical_state)?; + Some((in_memory, block.clone_sealed_header())) + } - let in_memory = guard.get_executed_blocks_up_to_height(block_num); - drop(guard); - - let in_memory = match in_memory { - Ok(Some(blocks)) => blocks, - Ok(None) => return None, - Err(e) => { - warn!(target: "flashblocks", "Failed to get flashblocks state provider: {e}. Flushing cache"); - self.inner.write().flush(); - return None; - } - }; - Some(( - Box::new(MemoryOverlayStateProvider::new(canonical_state, in_memory)), - block.clone_sealed_header(), - )) + pub fn get_pending_state_provider( + &self, + canonical_state: StateProviderBox, + ) -> Option<(StateProviderBox, SealedHeaderFor)> { + let mut guard = self.inner.write(); + let block = guard.get_pending_block()?.block; + let block_num = block.number(); + let in_memory = guard.get_state_provider_at_height(block_num, canonical_state)?; + Some((in_memory, block.clone_sealed_header())) } } // FlashblockStateCache state mutation interfaces. -impl FlashblockStateCache { +impl FlashblockStateCache +where + N::Receipt: FlashblockCachedReceipt, +{ /// Handles updating the latest pending state by the flashblocks rpc handle. /// /// This method detects when the flashblocks sequencer has advanced to the next @@ -230,7 +206,10 @@ impl FlashblockStateCache { /// Inner state of the flashblocks state cache. #[derive(Debug)] -struct FlashblockStateCacheInner { +struct FlashblockStateCacheInner +where + N::Receipt: FlashblockCachedReceipt, +{ /// The current in-progress pending flashblock sequence, if any. pending_cache: Option>, /// Cache of confirmed flashblock sequences ahead of the canonical chain. @@ -248,7 +227,10 @@ struct FlashblockStateCacheInner { pending_sequence_tx: watch::Sender>>, } -impl FlashblockStateCacheInner { +impl FlashblockStateCacheInner +where + N::Receipt: FlashblockCachedReceipt, +{ fn new() -> Self { let (tx, rx) = watch::channel(None); @@ -415,4 +397,53 @@ impl FlashblockStateCacheInner { pub fn subscribe_pending_sequence(&self) -> PendingSequenceRx { self.pending_sequence_rx.clone() } + + /// Instantiates a `MemoryOverlayStateProvider` by getting the ordered `ExecutedBlock`s + /// from the cache, and overlaying them on top of the canonical state provider. + /// + /// # Safety of the overlay + /// The returned blocks are meant to be layered on top of a canonical `StateProviderBox` + /// via `MemoryOverlayStateProvider`. This is correct **if and only if** the overlay + /// blocks form a contiguous chain from some height down to `canonical_height + 1` + /// (or `canonical_height` itself in the redundant-but-safe race case). + /// + /// **Safe (redundant overlap)**: Due to a race between canonical commit and confirm + /// cache flush, the lowest overlay block may equal the canonical height. For example, + /// canonical is at height `x` and the overlay contains `[x+2, x+1, x]`. This is safe + /// because `MemoryOverlayStateProvider` checks overlay blocks first (newest-to-oldest) + /// — the duplicate `BundleState` at height `x` contains changes identical to what + /// canonical already applied, so the result is correct regardless of which source + /// resolves the query. + /// + /// **State inconsistency (gap in overlay)**: If an intermediate block is missing (e.g. + /// overlay has `[x+2, x]` but not `x+1`), any account modified only at height `x+1` + /// would be invisible — the query falls through to canonical, returning stale state. + /// + /// **State inconsistency (canonical too far behind)**: If the canonical height is more + /// than one block below the lowest overlay block (e.g. canonical at `x-2`, lowest overlay + /// at `x`), changes at height `x-1` are not covered by either source. + /// + /// Both failure modes reduce to: every height between `canonical_height + 1` and the + /// target must be present in the overlay. This invariant is naturally maintained by + /// `handle_confirmed_block` (rejects non-consecutive heights) and the pending block always + /// being `confirm_height + 1`. + /// + /// On validation failure (non-contiguous overlay or gap to canonical), the cache is + /// flushed and `None` is returned. + pub fn get_state_provider_at_height( + &mut self, + height: u64, + canonical_state: StateProviderBox, + ) -> Option { + let in_memory = match self.get_executed_blocks_up_to_height(height) { + Ok(Some(blocks)) => blocks, + Ok(None) => return None, + Err(e) => { + warn!(target: "flashblocks", "Failed to get flashblocks state provider: {e}. Flushing cache"); + self.flush(); + return None; + } + }; + Some(Box::new(MemoryOverlayStateProvider::new(canonical_state, in_memory))) + } } diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs index 65ceec61..17e1dd58 100644 --- a/crates/flashblocks/src/cache/pending.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -1,4 +1,4 @@ -use crate::cache::CachedTxInfo; +use crate::{cache::CachedTxInfo, execution::FlashblockCachedReceipt}; use derive_more::Deref; use std::collections::HashMap; @@ -11,7 +11,10 @@ use reth_rpc_eth_types::{block::BlockAndReceipts, PendingBlock}; /// The pending flashblocks sequence built with all received OpFlashblockPayload /// alongside the metadata for the last added flashblock. #[derive(Debug, Clone, Deref)] -pub struct PendingSequence { +pub struct PendingSequence +where + N::Receipt: FlashblockCachedReceipt, +{ /// Locally built full pending block of the latest flashblocks sequence. #[deref] pub pending: PendingBlock, @@ -25,21 +28,20 @@ pub struct PendingSequence { pub parent_hash: B256, /// The last flashblock index of the latest flashblocks sequence. pub last_flashblock_index: u64, + /// Cached number of transactions covered by the pending sequence execution. + cached_tx_count: usize, + /// Cached receipts for the prefix. + pub cached_receipts: Vec, + /// Total gas used by the pending sequence. + pub cached_gas_used: u64, + /// Total blob/DA gas used by the pending sequence. + pub cached_blob_gas_used: u64, } -impl PendingSequence { - /// Create new pending flashblock. - pub const fn new( - pending: PendingBlock, - tx_index: HashMap>, - cached_reads: CachedReads, - block_hash: B256, - parent_hash: B256, - last_flashblock_index: u64, - ) -> Self { - Self { pending, tx_index, cached_reads, block_hash, parent_hash, last_flashblock_index } - } - +impl PendingSequence +where + N::Receipt: FlashblockCachedReceipt, +{ pub fn get_hash(&self) -> B256 { self.block_hash } @@ -80,14 +82,18 @@ mod tests { let block_hash = executed.recovered_block.hash(); let parent_hash = executed.recovered_block.parent_hash(); let pending_block = PendingBlock::with_executed_block(Instant::now(), executed); - PendingSequence::new( - pending_block, - HashMap::new(), - Default::default(), + PendingSequence { + pending: pending_block, + tx_index: HashMap::new(), + cached_reads: Default::default(), block_hash, parent_hash, - 0, - ) + last_flashblock_index: 0, + cached_tx_count: 0, + cached_receipts: vec![], + cached_gas_used: 0, + cached_blob_gas_used: 0, + } } fn make_pending_sequence_with_txs( @@ -101,7 +107,7 @@ mod tests { let mut tx_index = HashMap::new(); for i in 0..tx_count { let tx = mock_tx(i as u64); - let tx_hash = *tx.tx_hash(); + let tx_hash = tx.tx_hash(); let receipt = OpReceipt::Eip7702(Receipt { status: true.into(), cumulative_gas_used: 21_000 * (i as u64 + 1), @@ -114,48 +120,52 @@ mod tests { } let pending_block = PendingBlock::with_executed_block(Instant::now(), executed); - PendingSequence::new( - pending_block, + PendingSequence { + pending: pending_block, tx_index, - Default::default(), + cached_reads: Default::default(), block_hash, parent_hash, - 0, - ) + last_flashblock_index: 0, + cached_tx_count: 0, + cached_receipts: vec![], + cached_gas_used: 0, + cached_blob_gas_used: 0, + } } #[test] fn test_pending_sequence_get_hash_returns_stored_block_hash() { - let seq = make_pending_sequence(42); - assert_eq!(seq.get_hash(), seq.block_hash); + let cache = make_pending_sequence(42); + assert_eq!(cache.get_hash(), cache.block_hash); } #[test] fn test_pending_sequence_get_height_returns_block_number() { - let seq = make_pending_sequence(99); - assert_eq!(seq.get_height(), 99); + let cache = make_pending_sequence(99); + assert_eq!(cache.get_height(), 99); } #[test] fn test_pending_sequence_get_block_and_receipts_empty_receipts_on_no_tx_block() { - let seq = make_pending_sequence(3); - let bar = seq.get_block_and_receipts(); + let cache = make_pending_sequence(3); + let bar = cache.get_block_and_receipts(); assert!(bar.receipts.is_empty()); } #[test] fn test_pending_sequence_get_tx_info_returns_none_for_unknown_hash() { - let seq = make_pending_sequence_with_txs(10, 2); - assert!(seq.get_tx_info(&B256::repeat_byte(0xFF)).is_none()); + let cache = make_pending_sequence_with_txs(10, 2); + assert!(cache.get_tx_info(&B256::repeat_byte(0xFF)).is_none()); } #[test] fn test_pending_sequence_get_tx_info_returns_correct_info_for_known_tx() { - let seq = make_pending_sequence_with_txs(42, 3); - let (tx_hash, expected_info) = seq.tx_index.iter().next().unwrap(); - let (info, bar) = seq.get_tx_info(tx_hash).expect("known tx hash should return Some"); + let cache = make_pending_sequence_with_txs(42, 3); + let (tx_hash, expected_info) = cache.tx_index.iter().next().unwrap(); + let (info, bar) = cache.get_tx_info(tx_hash).expect("known tx hash should return Some"); assert_eq!(info.block_number, 42); - assert_eq!(info.block_hash, seq.block_hash); + assert_eq!(info.block_hash, cache.block_hash); assert_eq!(info.tx_index, expected_info.tx_index); assert_eq!(*info.tx.tx_hash(), *tx_hash); assert_eq!(bar.block.number(), 42); diff --git a/crates/flashblocks/src/subscription/rpc.rs b/crates/flashblocks/src/subscription/rpc.rs index 53e1955e..b37f03a5 100644 --- a/crates/flashblocks/src/subscription/rpc.rs +++ b/crates/flashblocks/src/subscription/rpc.rs @@ -1,4 +1,5 @@ use crate::{ + execution::FlashblockCachedReceipt, subscription::pubsub::{ EnrichedTransaction, FlashblockParams, FlashblockStreamEvent, FlashblockSubscriptionKind, FlashblocksFilter, @@ -74,7 +75,10 @@ pub trait FlashblocksPubSubApi { /// Optimism-specific Ethereum pubsub handler that extends standard subscriptions with flashblocks support. #[derive(Clone)] -pub struct FlashblocksPubSub { +pub struct FlashblocksPubSub +where + N::Receipt: FlashblockCachedReceipt, +{ /// Standard eth pubsub handler eth_pubsub: EthPubSub, /// All nested flashblocks fields bundled together @@ -86,6 +90,7 @@ where Eth: RpcNodeCore + 'static, Eth::Provider: BlockNumReader, Eth::RpcConvert: RpcConvert + Clone, + N::Receipt: FlashblockCachedReceipt, { /// Creates a new, shareable instance. /// @@ -162,6 +167,7 @@ where Eth: RpcNodeCore + 'static, Eth::Provider: BlockNumReader, Eth::RpcConvert: RpcConvert + Clone, + N::Receipt: FlashblockCachedReceipt, { async fn subscribe( &self, @@ -196,7 +202,10 @@ where } #[derive(Clone)] -pub struct FlashblocksPubSubInner { +pub struct FlashblocksPubSubInner +where + N::Receipt: FlashblockCachedReceipt, +{ /// Pending block receiver from flashblocks, if available pub(crate) pending_block_rx: PendingSequenceRx, /// The type that's used to spawn subscription tasks. @@ -211,6 +220,7 @@ impl FlashblocksPubSubInner where Eth: RpcNodeCore + 'static, Eth::RpcConvert: RpcConvert + Clone, + N::Receipt: FlashblockCachedReceipt, { fn new_flashblocks_stream( &self, @@ -488,7 +498,10 @@ where /// Extract `Header` from `PendingFlashBlock` fn extract_header_from_pending_block( pending_block: &PendingSequence, -) -> Result, ErrorObject<'static>> { +) -> Result, ErrorObject<'static>> +where + N::Receipt: FlashblockCachedReceipt, +{ let block = pending_block.block(); Ok(Header::from_consensus( block.clone_sealed_header().into(), From 324de323c6bb7475cfacadbcf74de41077183d2c Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 16 Mar 2026 14:27:46 +0800 Subject: [PATCH 29/76] chore(flashblocks): re-export FlashblockCachedReceipt from crate root MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- crates/flashblocks/src/cache/mod.rs | 2 +- crates/flashblocks/src/cache/pending.rs | 2 +- crates/flashblocks/src/lib.rs | 1 + crates/flashblocks/src/subscription/rpc.rs | 3 +-- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 37be3ab2..17b99eb7 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -7,7 +7,7 @@ pub use confirm::ConfirmCache; pub use pending::PendingSequence; pub use raw::RawFlashblocksCache; -use crate::{execution::FlashblockCachedReceipt, PendingSequenceRx}; +use crate::{FlashblockCachedReceipt, PendingSequenceRx}; use parking_lot::RwLock; use std::sync::Arc; use tokio::sync::watch; diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs index 17e1dd58..27d49d00 100644 --- a/crates/flashblocks/src/cache/pending.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -1,4 +1,4 @@ -use crate::{cache::CachedTxInfo, execution::FlashblockCachedReceipt}; +use crate::{cache::CachedTxInfo, FlashblockCachedReceipt}; use derive_more::Deref; use std::collections::HashMap; diff --git a/crates/flashblocks/src/lib.rs b/crates/flashblocks/src/lib.rs index 20bd2ec7..e2189344 100644 --- a/crates/flashblocks/src/lib.rs +++ b/crates/flashblocks/src/lib.rs @@ -11,6 +11,7 @@ mod ws; mod test_utils; pub use cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}; +pub use execution::FlashblockCachedReceipt; pub use service::FlashblocksRpcService; pub use subscription::FlashblocksPubSub; pub use ws::WsFlashBlockStream; diff --git a/crates/flashblocks/src/subscription/rpc.rs b/crates/flashblocks/src/subscription/rpc.rs index b37f03a5..362e8fbe 100644 --- a/crates/flashblocks/src/subscription/rpc.rs +++ b/crates/flashblocks/src/subscription/rpc.rs @@ -1,10 +1,9 @@ use crate::{ - execution::FlashblockCachedReceipt, subscription::pubsub::{ EnrichedTransaction, FlashblockParams, FlashblockStreamEvent, FlashblockSubscriptionKind, FlashblocksFilter, }, - PendingSequence, PendingSequenceRx, + FlashblockCachedReceipt, PendingSequence, PendingSequenceRx, }; use futures::StreamExt; From 707f5400acee128b00106a474dd77001150cf55b Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 16 Mar 2026 14:47:57 +0800 Subject: [PATCH 30/76] refactor(flashblocks): remove execution cache, inline types into execution mod MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- crates/flashblocks/src/cache/confirm.rs | 12 +- crates/flashblocks/src/execution/cache.rs | 675 --------------------- crates/flashblocks/src/execution/mod.rs | 32 +- crates/flashblocks/src/execution/worker.rs | 477 +++++++-------- crates/flashblocks/src/lib.rs | 2 +- 5 files changed, 252 insertions(+), 946 deletions(-) delete mode 100644 crates/flashblocks/src/execution/cache.rs diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index ddbd1883..512f9525 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -520,7 +520,7 @@ mod tests { let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 3); let block_hash = block.recovered_block.hash(); let tx_hashes: Vec<_> = - block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + block.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); cache.insert(1, block, receipts).expect("insert"); for (i, tx_hash) in tx_hashes.iter().enumerate() { @@ -537,7 +537,7 @@ mod tests { let mut cache = ConfirmCache::::new(); let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); let tx_hashes: Vec<_> = - block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + block.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); cache.insert(1, block, receipts).expect("insert"); cache.flush_up_to_height(1); @@ -551,7 +551,7 @@ mod tests { let mut cache = ConfirmCache::::new(); let (block, receipts) = make_executed_block_with_txs(5, B256::ZERO, 0, 2); let tx_hashes: Vec<_> = - block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + block.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); cache.insert(5, block, receipts).expect("insert"); cache.remove_block_by_number(5); @@ -586,13 +586,13 @@ mod tests { let mut cache = ConfirmCache::::new(); let (block1, receipts1) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); let tx_hashes_1: Vec<_> = - block1.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + block1.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); let parent = block1.recovered_block.hash(); cache.insert(1, block1, receipts1).expect("insert 1"); let (block2, receipts2) = make_executed_block_with_txs(2, parent, 100, 2); let tx_hashes_2: Vec<_> = - block2.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + block2.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); cache.insert(2, block2, receipts2).expect("insert 2"); cache.flush_up_to_height(1); @@ -609,7 +609,7 @@ mod tests { let mut cache = ConfirmCache::::new(); let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); let tx_hashes: Vec<_> = - block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + block.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); cache.insert(1, block, receipts).expect("insert"); cache.clear(); diff --git a/crates/flashblocks/src/execution/cache.rs b/crates/flashblocks/src/execution/cache.rs deleted file mode 100644 index 4c2efa92..00000000 --- a/crates/flashblocks/src/execution/cache.rs +++ /dev/null @@ -1,675 +0,0 @@ -//! Execution caching for flashblock building. -//! -//! When flashblocks arrive incrementally, each new flashblock triggers a rebuild of pending -//! state from all transactions in the sequence. To ensure that the incoming flashblocks -//! are incrementally re-built, from their sequence, the execution cache stores the cumulative -//! bundle state from previous executions. This ensures that states are not re-read from disk -//! for accounts/storage that were already loaded in previous builds. -//! -//! # Approach -//! -//! This module caches the cumulative bundle state from previous executions. When the next -//! flashblock arrives, if its transaction list is a continuation of the cached list, the -//! cached bundle can be used as a **prestate** for the State builder. This avoids redundant -//! disk reads for accounts/storage that were already modified. -//! -//! The cache stores: -//! - Ordered list of executed transaction hashes (for prefix matching) -//! - Cumulative bundle state after all cached transactions (used as prestate) -//! - Cumulative receipts for all cached transactions (for future optimization) -//! - Block-level execution metadata for cached transactions (gas/requests) -//! -//! # Example -//! -//! ```text -//! Flashblock 0: txs [A, B] -//! -> Execute A, B from scratch (cold state reads) -//! -> Cache: txs=[A,B], bundle=state_after_AB -//! -//! Flashblock 1: txs [A, B, C] -//! -> Prefix [A, B] matches cache -//! -> Use cached bundle as prestate (warm state) -//! -> Execute A, B, C (A, B hit prestate cache, faster) -//! -> Cache: txs=[A,B,C], bundle=state_after_ABC -//! -//! Flashblock 2 (reorg): txs [A, D, E] -//! -> Prefix [A] matches, but tx[1]=D != B -//! -> Cached prestate may be partially useful, but diverges -//! -> Execute A, D, E -//! ``` - -use alloy_eips::eip7685::Requests; -use alloy_primitives::B256; -use reth_primitives_traits::NodePrimitives; -use reth_revm::db::BundleState; - -/// Cached block-level execution metadata for the stored transaction prefix. -#[derive(Debug, Clone, Default, PartialEq, Eq)] -pub(crate) struct CachedExecutionMeta { - /// EIP-7685 requests emitted while executing the cached prefix. - pub requests: Requests, - /// Total gas used by the cached prefix. - pub gas_used: u64, - /// Total blob/DA gas used by the cached prefix. - pub blob_gas_used: u64, -} - -/// Resumable cached state plus execution metadata for the cached prefix. -pub(crate) type ResumableState<'a, N> = - (&'a BundleState, &'a [::Receipt], &'a Requests, u64, u64, usize); - -/// Cache of transaction execution results for a single block. -/// -/// Stores cumulative execution state that can be used as a prestate to avoid -/// redundant disk reads when re-executing transactions. The cached bundle provides -/// warm state for accounts/storage already loaded, improving execution performance. -/// -/// **Note**: This cache does NOT skip transaction execution - all transactions must -/// still be executed to populate the block body. The cache only optimizes state reads. -/// -/// The cache is invalidated when: -/// - A new block starts (different block number) -/// - Parent hash changes for parent-scoped lookups -/// - A reorg is detected (transaction list diverges from cached prefix) -/// - Explicitly cleared -#[derive(Debug)] -pub struct TransactionCache { - /// Block number this cache is valid for. - block_number: u64, - /// Parent hash this cache is valid for. - cached_parent_hash: Option, - /// Ordered list of transaction hashes that have been executed. - executed_tx_hashes: Vec, - /// Cumulative bundle state after executing all cached transactions. - cumulative_bundle: BundleState, - /// Receipts for all cached transactions, in execution order. - receipts: Vec, - /// Cached block-level execution metadata. - execution_meta: CachedExecutionMeta, -} - -impl Default for TransactionCache { - fn default() -> Self { - Self::new() - } -} - -impl TransactionCache { - /// Creates a new empty transaction cache. - pub fn new() -> Self { - Self { - block_number: 0, - cached_parent_hash: None, - executed_tx_hashes: Vec::new(), - cumulative_bundle: BundleState::default(), - receipts: Vec::new(), - execution_meta: CachedExecutionMeta::default(), - } - } - - /// Creates a new cache for a specific block number. - pub fn for_block(block_number: u64) -> Self { - Self { block_number, ..Self::new() } - } - - /// Returns the block number this cache is valid for. - pub const fn block_number(&self) -> u64 { - self.block_number - } - - /// Returns the parent hash this cache is valid for, if tracked. - pub const fn parent_hash(&self) -> Option { - self.cached_parent_hash - } - - /// Checks if this cache is valid for the given block number. - pub const fn is_valid_for_block(&self, block_number: u64) -> bool { - self.block_number == block_number - } - - /// Checks if this cache is valid for the given block number and parent hash. - pub fn is_valid_for_block_parent(&self, block_number: u64, parent_hash: B256) -> bool { - self.block_number == block_number && self.cached_parent_hash == Some(parent_hash) - } - - /// Returns the number of cached transactions. - pub const fn len(&self) -> usize { - self.executed_tx_hashes.len() - } - - /// Returns true if the cache is empty. - pub const fn is_empty(&self) -> bool { - self.executed_tx_hashes.is_empty() - } - - /// Returns the cached transaction hashes. - pub fn executed_tx_hashes(&self) -> &[B256] { - &self.executed_tx_hashes - } - - /// Returns the cached receipts. - pub fn receipts(&self) -> &[N::Receipt] { - &self.receipts - } - - /// Returns the cumulative bundle state. - pub const fn bundle(&self) -> &BundleState { - &self.cumulative_bundle - } - - /// Clears the cache. - pub fn clear(&mut self) { - self.executed_tx_hashes.clear(); - self.cumulative_bundle = BundleState::default(); - self.receipts.clear(); - self.execution_meta = CachedExecutionMeta::default(); - self.block_number = 0; - self.cached_parent_hash = None; - } - - /// Updates the cache for a new block, clearing if the block number changed. - /// - /// Returns true if the cache was cleared. - pub fn update_for_block(&mut self, block_number: u64) -> bool { - if self.block_number == block_number { - false - } else { - self.clear(); - self.block_number = block_number; - true - } - } - - /// Computes the length of the matching prefix between cached transactions - /// and the provided transaction hashes. - /// - /// Returns the number of transactions that can be skipped because they - /// match the cached execution results. - pub fn matching_prefix_len(&self, tx_hashes: &[B256]) -> usize { - self.executed_tx_hashes - .iter() - .zip(tx_hashes.iter()) - .take_while(|(cached, incoming)| cached == incoming) - .count() - } - - /// Returns cached state for resuming execution if the incoming transactions have a - /// matching prefix with the cache. - /// - /// Returns `Some((bundle, receipts, requests, gas_used, blob_gas_used, skip_count))` - /// if there's a non-empty matching prefix, and the full cache matches the incoming - /// prefix, where: - /// - `bundle` is the cumulative state after the matching prefix - /// - `receipts` is the receipts for the matching prefix - /// - `skip_count` is the number of transactions to skip - /// - /// Returns `None` if: - /// - The cache is empty - /// - No prefix matches (first transaction differs) - /// - Block number doesn't match - pub(crate) fn get_resumable_state( - &self, - block_number: u64, - tx_hashes: &[B256], - ) -> Option> { - if !self.is_valid_for_block(block_number) || self.is_empty() { - return None; - } - - let prefix_len = self.matching_prefix_len(tx_hashes); - if prefix_len == 0 { - return None; - } - - // Only return state if the full cache matches (partial prefix would need - // intermediate state snapshots, which we don't currently store). - // Partial match means incoming txs diverge from cache, need to re-execute. - (prefix_len == self.executed_tx_hashes.len()).then_some(( - &self.cumulative_bundle, - self.receipts.as_slice(), - &self.execution_meta.requests, - self.execution_meta.gas_used, - self.execution_meta.blob_gas_used, - prefix_len, - )) - } - - /// Returns cached state for resuming execution if the incoming transactions have a - /// matching prefix with the cache and the parent hash matches. - /// - /// Returns `Some((bundle, receipts, requests, gas_used, blob_gas_used, skip_count))` - /// if there's a non-empty matching prefix, where the full cache matches the incoming prefix, and the - /// `(block_number, parent_hash)` tuple matches the cached scope. - pub(crate) fn get_resumable_state_for_parent( - &self, - block_number: u64, - parent_hash: B256, - tx_hashes: &[B256], - ) -> Option> { - if !self.is_valid_for_block_parent(block_number, parent_hash) || self.is_empty() { - return None; - } - - let prefix_len = self.matching_prefix_len(tx_hashes); - if prefix_len == 0 { - return None; - } - - (prefix_len == self.executed_tx_hashes.len()).then_some(( - &self.cumulative_bundle, - self.receipts.as_slice(), - &self.execution_meta.requests, - self.execution_meta.gas_used, - self.execution_meta.blob_gas_used, - prefix_len, - )) - } - - /// Updates the cache with new execution results. - /// - /// This should be called after executing a flashblock. The provided bundle - /// and receipts should represent the cumulative state after all transactions. - pub fn update( - &mut self, - block_number: u64, - tx_hashes: Vec, - bundle: BundleState, - receipts: Vec, - ) { - self.update_with_execution_meta( - block_number, - tx_hashes, - bundle, - receipts, - CachedExecutionMeta::default(), - ); - } - - /// Updates the cache with new execution results and block-level metadata. - pub(crate) fn update_with_execution_meta( - &mut self, - block_number: u64, - tx_hashes: Vec, - bundle: BundleState, - receipts: Vec, - execution_meta: CachedExecutionMeta, - ) { - self.block_number = block_number; - self.cached_parent_hash = None; - self.executed_tx_hashes = tx_hashes; - self.cumulative_bundle = bundle; - self.receipts = receipts; - self.execution_meta = execution_meta; - } - - /// Updates the cache with new execution results and block-level metadata, scoped to the - /// provided parent hash. - pub(crate) fn update_with_execution_meta_for_parent( - &mut self, - block_number: u64, - parent_hash: B256, - tx_hashes: Vec, - bundle: BundleState, - receipts: Vec, - execution_meta: CachedExecutionMeta, - ) { - self.block_number = block_number; - self.cached_parent_hash = Some(parent_hash); - self.executed_tx_hashes = tx_hashes; - self.cumulative_bundle = bundle; - self.receipts = receipts; - self.execution_meta = execution_meta; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use reth_optimism_primitives::OpPrimitives; - - type TestCache = TransactionCache; - - #[test] - fn test_cache_block_validation() { - let mut cache = TestCache::for_block(100); - assert!(cache.is_valid_for_block(100)); - assert!(!cache.is_valid_for_block(101)); - assert!(!cache.is_valid_for_block_parent(100, B256::repeat_byte(0x11))); - - // Update for same block doesn't clear - assert!(!cache.update_for_block(100)); - - // Update for different block clears - assert!(cache.update_for_block(101)); - assert!(cache.is_valid_for_block(101)); - assert!(cache.parent_hash().is_none()); - } - - #[test] - fn test_cache_clear() { - let mut cache = TestCache::for_block(100); - assert_eq!(cache.block_number(), 100); - - cache.clear(); - assert_eq!(cache.block_number(), 0); - assert!(cache.is_empty()); - } - - #[test] - fn test_matching_prefix_len() { - let mut cache = TestCache::for_block(100); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - let tx_d = B256::repeat_byte(0xDD); - - // Update cache with [A, B] - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); - - // Full match - assert_eq!(cache.matching_prefix_len(&[tx_a, tx_b]), 2); - - // Continuation - assert_eq!(cache.matching_prefix_len(&[tx_a, tx_b, tx_c]), 2); - - // Partial match (reorg at position 1) - assert_eq!(cache.matching_prefix_len(&[tx_a, tx_d, tx_c]), 1); - - // No match (reorg at position 0) - assert_eq!(cache.matching_prefix_len(&[tx_d, tx_b, tx_c]), 0); - - // Empty incoming - assert_eq!(cache.matching_prefix_len(&[]), 0); - } - - #[test] - fn test_get_resumable_state() { - let mut cache = TestCache::for_block(100); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - - // Empty cache returns None - assert!(cache.get_resumable_state(100, &[tx_a, tx_b]).is_none()); - - // Update cache with [A, B] - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); - - // Wrong block number returns None - assert!(cache.get_resumable_state(101, &[tx_a, tx_b]).is_none()); - - // Exact match returns state - let result = cache.get_resumable_state(100, &[tx_a, tx_b]); - assert!(result.is_some()); - let (_, _, _, _, _, skip) = result.unwrap(); - assert_eq!(skip, 2); - - // Continuation returns state (can skip cached txs) - let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); - assert!(result.is_some()); - let (_, _, _, _, _, skip) = result.unwrap(); - assert_eq!(skip, 2); - - // Partial match (reorg) returns None - can't use partial cache - assert!(cache.get_resumable_state(100, &[tx_a, tx_c]).is_none()); - } - - // ==================== E2E Cache Reuse Scenario Tests ==================== - - /// Tests the complete E2E cache scenario: fb0 [A,B] → fb1 [A,B,C] - /// Verifies that cached bundle can be used as prestate for the continuation. - #[test] - fn test_e2e_cache_reuse_continuation_scenario() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - - // Simulate fb0: execute [A, B] from scratch - let fb0_txs = vec![tx_a, tx_b]; - assert!(cache.get_resumable_state(100, &fb0_txs).is_none()); - - // After fb0 execution, update cache - cache.update(100, fb0_txs, BundleState::default(), vec![]); - assert_eq!(cache.len(), 2); - - // Simulate fb1: [A, B, C] - should resume from cached state - let fb1_txs = vec![tx_a, tx_b, tx_c]; - let result = cache.get_resumable_state(100, &fb1_txs); - assert!(result.is_some()); - let (bundle, receipts, _, _, _, skip) = result.unwrap(); - - // skip=2 indicates 2 txs are covered by cached state (for logging) - // Note: All transactions are still executed, skip is informational only - assert_eq!(skip, 2); - // Bundle is used as prestate to warm the State builder - assert!(bundle.state.is_empty()); // Default bundle is empty in test - assert!(receipts.is_empty()); // No receipts in this test - - // After fb1 execution, update cache with full list - cache.update(100, fb1_txs, BundleState::default(), vec![]); - assert_eq!(cache.len(), 3); - } - - /// Tests reorg scenario: fb0 [A, B] → fb1 [A, D, E] - /// Verifies that divergent tx list invalidates cache. - #[test] - fn test_e2e_cache_reorg_scenario() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_d = B256::repeat_byte(0xDD); - let tx_e = B256::repeat_byte(0xEE); - - // fb0: execute [A, B] - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); - - // fb1 (reorg): [A, D, E] - tx[1] diverges, cannot resume - let fb1_txs = vec![tx_a, tx_d, tx_e]; - let result = cache.get_resumable_state(100, &fb1_txs); - assert!(result.is_none()); // Partial match means we can't use cache - } - - /// Tests multi-flashblock progression within same block: - /// fb0 [A] → fb1 [A,B] → fb2 [A,B,C] - /// - /// Each flashblock can use the previous bundle as prestate for warm state reads. - /// Note: All transactions are still executed; skip count is for logging only. - #[test] - fn test_e2e_multi_flashblock_progression() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - - // fb0: [A] - cache.update(100, vec![tx_a], BundleState::default(), vec![]); - assert_eq!(cache.len(), 1); - - // fb1: [A, B] - cached state covers [A] (skip=1 for logging) - let fb1_txs = vec![tx_a, tx_b]; - let result = cache.get_resumable_state(100, &fb1_txs); - assert!(result.is_some()); - assert_eq!(result.unwrap().4, 1); // 1 tx covered by cache - - cache.update(100, fb1_txs, BundleState::default(), vec![]); - assert_eq!(cache.len(), 2); - - // fb2: [A, B, C] - cached state covers [A, B] (skip=2 for logging) - let fb2_txs = vec![tx_a, tx_b, tx_c]; - let result = cache.get_resumable_state(100, &fb2_txs); - assert!(result.is_some()); - assert_eq!(result.unwrap().5, 2); // 2 txs covered by cache - - cache.update(100, fb2_txs, BundleState::default(), vec![]); - assert_eq!(cache.len(), 3); - } - - /// Tests that cache is invalidated on block number change. - #[test] - fn test_e2e_block_transition_clears_cache() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - - // Block 100: cache [A, B] - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); - assert_eq!(cache.len(), 2); - - // Block 101: same txs shouldn't resume (different block) - let result = cache.get_resumable_state(101, &[tx_a, tx_b]); - assert!(result.is_none()); - - // Explicit block update clears cache - cache.update_for_block(101); - assert!(cache.is_empty()); - } - - /// Tests cache behavior with empty transaction list. - #[test] - fn test_cache_empty_transactions() { - let mut cache = TestCache::new(); - - // Empty flashblock (only system tx, no user txs) - cache.update(100, vec![], BundleState::default(), vec![]); - assert!(cache.is_empty()); - - // Can't resume from empty cache - let tx_a = B256::repeat_byte(0xAA); - assert!(cache.get_resumable_state(100, &[tx_a]).is_none()); - } - - /// Documents the semantics of `skip_count`. - /// - /// A resumable state is only returned when the incoming transaction list fully extends the - /// cached list. In that case, `skip_count` is the number of prefix transactions covered by - /// cached execution output. - #[test] - fn test_skip_count_matches_cached_prefix_len() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - - // Cache state after executing [A, B] - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); - - // get_resumable_state returns skip=2 for prefix [A, B] - let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); - assert!(result.is_some()); - let (bundle, _receipts, _, _, _, skip_count) = result.unwrap(); - - // skip_count indicates cached prefix length - assert_eq!(skip_count, 2); - - // The bundle is the important part - used as resumable prestate. - assert!(bundle.state.is_empty()); // Default in test, real one has state - } - - /// Tests that receipts are properly cached and returned. - #[test] - fn test_cache_preserves_receipts() { - use op_alloy_consensus::OpReceipt; - use reth_optimism_primitives::OpPrimitives; - - let mut cache: TransactionCache = TransactionCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - - // Create mock receipts - let receipt_a = OpReceipt::Legacy(alloy_consensus::Receipt { - status: alloy_consensus::Eip658Value::Eip658(true), - cumulative_gas_used: 21000, - logs: vec![], - }); - let receipt_b = OpReceipt::Legacy(alloy_consensus::Receipt { - status: alloy_consensus::Eip658Value::Eip658(true), - cumulative_gas_used: 42000, - logs: vec![], - }); - - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![receipt_a, receipt_b]); - - // Verify receipts are preserved - assert_eq!(cache.receipts().len(), 2); - - // On resumable state, receipts are returned - let tx_c = B256::repeat_byte(0xCC); - let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); - assert!(result.is_some()); - let (_, receipts, _, _, _, _) = result.unwrap(); - assert_eq!(receipts.len(), 2); - } - - #[test] - fn test_cache_preserves_execution_meta() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - - let mut requests = Requests::default(); - requests.push_request_with_type(0x01, [0xAA, 0xBB]); - - cache.update_with_execution_meta( - 100, - vec![tx_a, tx_b], - BundleState::default(), - vec![], - CachedExecutionMeta { - requests: requests.clone(), - gas_used: 42_000, - blob_gas_used: 123, - }, - ); - - let resumable = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); - assert!(resumable.is_some()); - let (_, _, cached_requests, gas_used, blob_gas_used, skip_count) = resumable.unwrap(); - assert_eq!(skip_count, 2); - assert_eq!(gas_used, 42_000); - assert_eq!(blob_gas_used, 123); - assert_eq!(cached_requests, &requests); - } - - #[test] - fn test_cache_parent_scoping() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - let parent_a = B256::repeat_byte(0x11); - let parent_b = B256::repeat_byte(0x22); - - cache.update_with_execution_meta_for_parent( - 100, - parent_a, - vec![tx_a, tx_b], - BundleState::default(), - vec![], - CachedExecutionMeta { - requests: Requests::default(), - gas_used: 42_000, - blob_gas_used: 0, - }, - ); - - // Matching block + parent should hit. - let hit = cache.get_resumable_state_for_parent(100, parent_a, &[tx_a, tx_b, tx_c]); - assert!(hit.is_some()); - - // Same block but different parent should miss. - let miss = cache.get_resumable_state_for_parent(100, parent_b, &[tx_a, tx_b, tx_c]); - assert!(miss.is_none()); - } -} diff --git a/crates/flashblocks/src/execution/mod.rs b/crates/flashblocks/src/execution/mod.rs index 751b3fd3..70904c40 100644 --- a/crates/flashblocks/src/execution/mod.rs +++ b/crates/flashblocks/src/execution/mod.rs @@ -1,5 +1,29 @@ -mod cache; -use cache::{CachedExecutionMeta, TransactionCache}; - pub(crate) mod worker; -pub use worker::{BuildArgs, BuildResult, FlashblockCachedReceipt}; + +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; +use reth_optimism_primitives::OpReceipt; + +pub(crate) struct BuildArgs { + pub(crate) base: OpFlashblockPayloadBase, + pub(crate) transactions: I, + pub(crate) last_flashblock_index: u64, +} + +/// Receipt requirements for cache-resume flow. +pub(crate) trait FlashblockCachedReceipt: Clone { + /// Adds `gas_offset` to each receipt's `cumulative_gas_used`. + fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64); +} + +impl FlashblockCachedReceipt for OpReceipt { + fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64) { + if gas_offset == 0 { + return; + } + + for receipt in receipts { + let inner = receipt.as_receipt_mut(); + inner.cumulative_gas_used = inner.cumulative_gas_used.saturating_add(gas_offset); + } + } +} diff --git a/crates/flashblocks/src/execution/worker.rs b/crates/flashblocks/src/execution/worker.rs index 4d0f9154..973bc419 100644 --- a/crates/flashblocks/src/execution/worker.rs +++ b/crates/flashblocks/src/execution/worker.rs @@ -1,12 +1,11 @@ use crate::{ cache::{FlashblockStateCache, PendingSequence}, - execution::{CachedExecutionMeta, TransactionCache}, + BuildArgs, FlashblockCachedReceipt, }; use std::{ sync::Arc, time::{Duration, Instant}, }; -use tokio_util::sync::CancellationToken; use tracing::trace; use alloy_eips::{eip2718::WithEncoded, BlockNumberOrTag}; @@ -22,13 +21,11 @@ use reth_evm::{ ConfigureEvm, Evm, }; use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; -use reth_optimism_primitives::OpReceipt; use reth_primitives_traits::{ transaction::TxHashRef, AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy, Recovered, RecoveredBlock, SealedHeader, }; use reth_revm::{ - cached::CachedReads, database::StateProviderDatabase, db::{states::bundle_state::BundleRetention, BundleState, State}, }; @@ -38,81 +35,41 @@ use reth_storage_api::{ StateRootProvider, }; -pub(crate) struct BuildArgs { - pub(crate) base: OpFlashblockPayloadBase, - pub(crate) transactions: I, - pub(crate) cached_state: Option<(B256, CachedReads)>, - pub(crate) last_flashblock_index: u64, - pub(crate) cancel: CancellationToken, -} - /// The `FlashblocksValidator` builds [`PendingBlock`] out of a sequence of transactions. /// /// Owns a [`TransactionCache`] for incremental prefix caching between flashblock builds. #[derive(Debug)] -pub(crate) struct FlashblocksValidator { +pub(crate) struct FlashblockSequenceValidator +where + N::Receipt: FlashblockCachedReceipt, +{ /// The EVM configuration used to build the flashblocks. evm_config: EvmConfig, - /// The transaction execution cache for incremental executions. - tx_cache: TransactionCache, - /// The state cache containing the canonical chainstate provider and the flashblocks + /// The canonical chainstate provider. + provider: Provider, + /// The flashblocks state cache containing the flashblocks state cache layer. /// state cache layer. - state_cache: FlashblockStateCache, + flashblocks_state: FlashblockStateCache, } -impl FlashblocksValidator { +impl FlashblockSequenceValidator +where + N::Receipt: FlashblockCachedReceipt, +{ pub(crate) fn new( evm_config: EvmConfig, - state_cache: FlashblockStateCache, + provider: Provider, + flashblocks_state: FlashblockStateCache, ) -> Self { - Self { evm_config, state_cache, tx_cache: TransactionCache::new() } + Self { evm_config, provider, flashblocks_state } } pub(crate) const fn provider(&self) -> &Provider { &self.provider } - - /// Clears the transaction cache (used on reorg/catch-up). - pub(crate) fn clear_cache(&mut self) { - self.tx_cache.clear(); - } } -/// Cached prefix execution data used to resume canonical builds. -#[derive(Debug, Clone)] -struct CachedPrefixExecutionResult { - /// Number of leading transactions covered by cached execution. - cached_tx_count: usize, - /// Cumulative bundle state after executing the cached prefix. - bundle: BundleState, - /// Cached receipts for the prefix. - receipts: Vec, - /// Total gas used by the cached prefix. - gas_used: u64, - /// Total blob/DA gas used by the cached prefix. - blob_gas_used: u64, -} - -/// Receipt requirements for cache-resume flow. -pub trait FlashblockCachedReceipt: Clone { - /// Adds `gas_offset` to each receipt's `cumulative_gas_used`. - fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64); -} - -impl FlashblockCachedReceipt for OpReceipt { - fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64) { - if gas_offset == 0 { - return; - } - - for receipt in receipts { - let inner = receipt.as_receipt_mut(); - inner.cumulative_gas_used = inner.cumulative_gas_used.saturating_add(gas_offset); - } - } -} - -impl FlashBlockBuilder +impl FlashblockSequenceValidator where N: NodePrimitives, N::Receipt: FlashblockCachedReceipt, @@ -140,7 +97,7 @@ where pub(crate) fn execute>>>( &mut self, mut args: BuildArgs, - ) -> eyre::Result> { + ) -> eyre::Result<()> { trace!(target: "flashblocks", "Attempting new pending block from flashblocks"); let parent_hash = args.base.parent_hash; @@ -477,202 +434,202 @@ fn is_consistent_speculative_parent_hashes( incoming_parent_hash == pending_block_hash && pending_block_hash == pending_sealed_hash } -#[cfg(test)] -mod tests { - use super::{is_consistent_speculative_parent_hashes, BuildArgs, FlashBlockBuilder}; - use crate::execution::cache::CachedExecutionMeta; - use alloy_consensus::{SignableTransaction, TxEip1559}; - use alloy_eips::eip2718::Encodable2718; - use alloy_network::TxSignerSync; - use alloy_primitives::{Address, StorageKey, StorageValue, TxKind, B256, U256}; - use alloy_signer_local::PrivateKeySigner; - use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; - use op_revm::constants::L1_BLOCK_CONTRACT; - use reth_optimism_chainspec::OP_MAINNET; - use reth_optimism_evm::OpEvmConfig; - use reth_optimism_primitives::{OpBlock, OpPrimitives, OpTransactionSigned}; - use reth_primitives_traits::{AlloyBlockHeader, Recovered, SignerRecoverable}; - use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; - use reth_provider::ChainSpecProvider; - use reth_storage_api::BlockReaderIdExt; - use std::str::FromStr; - - fn signed_transfer_tx( - signer: &PrivateKeySigner, - nonce: u64, - recipient: Address, - ) -> OpTransactionSigned { - let mut tx = TxEip1559 { - chain_id: 10, // OP Mainnet chain id - nonce, - gas_limit: 100_000, - max_priority_fee_per_gas: 1_000_000_000, - max_fee_per_gas: 2_000_000_000, - to: TxKind::Call(recipient), - value: U256::from(1), - ..Default::default() - }; - let signature = signer.sign_transaction_sync(&mut tx).expect("signing tx succeeds"); - tx.into_signed(signature).into() - } - - fn into_encoded_recovered( - tx: OpTransactionSigned, - signer: Address, - ) -> alloy_eips::eip2718::WithEncoded> { - let encoded = tx.encoded_2718(); - Recovered::new_unchecked(tx, signer).into_encoded_with(encoded) - } - - #[test] - fn speculative_parent_hashes_must_all_match() { - let h = B256::repeat_byte(0x11); - assert!(is_consistent_speculative_parent_hashes(h, h, h)); - } - - #[test] - fn speculative_parent_hashes_reject_any_mismatch() { - let incoming = B256::repeat_byte(0x11); - let pending = B256::repeat_byte(0x22); - let sealed = B256::repeat_byte(0x33); - - assert!(!is_consistent_speculative_parent_hashes(incoming, pending, sealed)); - assert!(!is_consistent_speculative_parent_hashes(incoming, incoming, sealed)); - assert!(!is_consistent_speculative_parent_hashes(incoming, pending, pending)); - } - - #[test] - fn canonical_build_reuses_cached_prefix_execution() { - let provider = MockEthProvider::::new().with_chain_spec(OP_MAINNET.clone()); - let genesis_hash = provider.chain_spec().genesis_hash(); - let genesis_block = - OpBlock::new(provider.chain_spec().genesis_header().clone(), Default::default()); - provider.add_block(genesis_hash, genesis_block); - - let recipient = Address::repeat_byte(0x22); - let signer = PrivateKeySigner::random(); - let tx_a = signed_transfer_tx(&signer, 0, recipient); - let tx_b = signed_transfer_tx(&signer, 1, recipient); - let tx_c = signed_transfer_tx(&signer, 2, recipient); - let signer = tx_a.recover_signer().expect("tx signer recovery succeeds"); - - provider.add_account(signer, ExtendedAccount::new(0, U256::from(1_000_000_000_000_000u64))); - provider.add_account(recipient, ExtendedAccount::new(0, U256::ZERO)); - provider.add_account( - L1_BLOCK_CONTRACT, - ExtendedAccount::new(1, U256::ZERO).extend_storage([ - (StorageKey::with_last_byte(1), StorageValue::from(1_000_000_000u64)), - (StorageKey::with_last_byte(5), StorageValue::from(188u64)), - (StorageKey::with_last_byte(6), StorageValue::from(684_000u64)), - ( - StorageKey::with_last_byte(3), - StorageValue::from_str( - "0x0000000000000000000000000000000000001db0000d27300000000000000005", - ) - .expect("valid L1 fee scalar storage value"), - ), - ]), - ); - - let latest = provider - .latest_header() - .expect("provider latest header query succeeds") - .expect("genesis header exists"); - - let base = OpFlashblockPayloadBase { - parent_hash: latest.hash(), - parent_beacon_block_root: B256::ZERO, - fee_recipient: Address::ZERO, - prev_randao: B256::repeat_byte(0x55), - block_number: latest.number() + 1, - gas_limit: 30_000_000, - timestamp: latest.timestamp() + 2, - extra_data: Default::default(), - base_fee_per_gas: U256::from(1_000_000_000u64), - }; - let base_parent_hash = base.parent_hash; - - let tx_a_hash = B256::from(*tx_a.tx_hash()); - let tx_b_hash = B256::from(*tx_b.tx_hash()); - let tx_c_hash = B256::from(*tx_c.tx_hash()); - - let tx_a = into_encoded_recovered(tx_a, signer); - let tx_b = into_encoded_recovered(tx_b, signer); - let tx_c = into_encoded_recovered(tx_c, signer); - - let evm_config = OpEvmConfig::optimism(OP_MAINNET.clone()); - let mut builder = FlashBlockBuilder::::new(evm_config, provider); - - let first = builder - .execute(BuildArgs { - base: base.clone(), - transactions: vec![tx_a.clone(), tx_b.clone()], - cached_state: None, - last_flashblock_index: 0, - last_flashblock_hash: B256::repeat_byte(0xA0), - compute_state_root: false, - pending_parent: None, - }) - .expect("first build succeeds") - .expect("first build is canonical"); - - assert_eq!(first.pending_state.execution_outcome.result.receipts.len(), 2); - - let cached_hashes = vec![tx_a_hash, tx_b_hash]; - let (bundle, receipts, requests, gas_used, blob_gas_used, skip) = builder - .tx_cache - .get_resumable_state_with_execution_meta_for_parent( - base.block_number, - base_parent_hash, - &cached_hashes, - ) - .expect("cache should contain first build execution state"); - assert_eq!(skip, 2); - - let mut tampered_receipts = receipts.to_vec(); - tampered_receipts[0].as_receipt_mut().cumulative_gas_used = - tampered_receipts[0].as_receipt().cumulative_gas_used.saturating_add(17); - let expected_tampered_gas = tampered_receipts[0].as_receipt().cumulative_gas_used; - - builder.tx_cache.update_with_execution_meta_for_parent( - base.block_number, - base_parent_hash, - cached_hashes, - bundle.clone(), - tampered_receipts, - CachedExecutionMeta { requests: requests.clone(), gas_used, blob_gas_used }, - ); - - let second_hashes = vec![tx_a_hash, tx_b_hash, tx_c_hash]; - let (_, _, _, _, _, skip) = builder - .tx_cache - .get_resumable_state_with_execution_meta_for_parent( - base.block_number, - base_parent_hash, - &second_hashes, - ) - .expect("second tx list should extend cached prefix"); - assert_eq!(skip, 2); - - let second = builder - .execute(BuildArgs { - base, - transactions: vec![tx_a, tx_b, tx_c], - cached_state: None, - last_flashblock_index: 1, - last_flashblock_hash: B256::repeat_byte(0xA1), - compute_state_root: false, - pending_parent: None, - }) - .expect("second build succeeds") - .expect("second build is canonical"); - - let receipts = &second.pending_state.execution_outcome.result.receipts; - assert_eq!(receipts.len(), 3); - assert_eq!(receipts[0].as_receipt().cumulative_gas_used, expected_tampered_gas); - assert!( - receipts[2].as_receipt().cumulative_gas_used - > receipts[1].as_receipt().cumulative_gas_used - ); - } -} +// #[cfg(test)] +// mod tests { +// use super::{is_consistent_speculative_parent_hashes, BuildArgs, FlashBlockBuilder}; +// use crate::execution::cache::CachedExecutionMeta; +// use alloy_consensus::{SignableTransaction, TxEip1559}; +// use alloy_eips::eip2718::Encodable2718; +// use alloy_network::TxSignerSync; +// use alloy_primitives::{Address, StorageKey, StorageValue, TxKind, B256, U256}; +// use alloy_signer_local::PrivateKeySigner; +// use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; +// use op_revm::constants::L1_BLOCK_CONTRACT; +// use reth_optimism_chainspec::OP_MAINNET; +// use reth_optimism_evm::OpEvmConfig; +// use reth_optimism_primitives::{OpBlock, OpPrimitives, OpTransactionSigned}; +// use reth_primitives_traits::{AlloyBlockHeader, Recovered, SignerRecoverable}; +// use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; +// use reth_provider::ChainSpecProvider; +// use reth_storage_api::BlockReaderIdExt; +// use std::str::FromStr; + +// fn signed_transfer_tx( +// signer: &PrivateKeySigner, +// nonce: u64, +// recipient: Address, +// ) -> OpTransactionSigned { +// let mut tx = TxEip1559 { +// chain_id: 10, // OP Mainnet chain id +// nonce, +// gas_limit: 100_000, +// max_priority_fee_per_gas: 1_000_000_000, +// max_fee_per_gas: 2_000_000_000, +// to: TxKind::Call(recipient), +// value: U256::from(1), +// ..Default::default() +// }; +// let signature = signer.sign_transaction_sync(&mut tx).expect("signing tx succeeds"); +// tx.into_signed(signature).into() +// } + +// fn into_encoded_recovered( +// tx: OpTransactionSigned, +// signer: Address, +// ) -> alloy_eips::eip2718::WithEncoded> { +// let encoded = tx.encoded_2718(); +// Recovered::new_unchecked(tx, signer).into_encoded_with(encoded) +// } + +// #[test] +// fn speculative_parent_hashes_must_all_match() { +// let h = B256::repeat_byte(0x11); +// assert!(is_consistent_speculative_parent_hashes(h, h, h)); +// } + +// #[test] +// fn speculative_parent_hashes_reject_any_mismatch() { +// let incoming = B256::repeat_byte(0x11); +// let pending = B256::repeat_byte(0x22); +// let sealed = B256::repeat_byte(0x33); + +// assert!(!is_consistent_speculative_parent_hashes(incoming, pending, sealed)); +// assert!(!is_consistent_speculative_parent_hashes(incoming, incoming, sealed)); +// assert!(!is_consistent_speculative_parent_hashes(incoming, pending, pending)); +// } + +// #[test] +// fn canonical_build_reuses_cached_prefix_execution() { +// let provider = MockEthProvider::::new().with_chain_spec(OP_MAINNET.clone()); +// let genesis_hash = provider.chain_spec().genesis_hash(); +// let genesis_block = +// OpBlock::new(provider.chain_spec().genesis_header().clone(), Default::default()); +// provider.add_block(genesis_hash, genesis_block); + +// let recipient = Address::repeat_byte(0x22); +// let signer = PrivateKeySigner::random(); +// let tx_a = signed_transfer_tx(&signer, 0, recipient); +// let tx_b = signed_transfer_tx(&signer, 1, recipient); +// let tx_c = signed_transfer_tx(&signer, 2, recipient); +// let signer = tx_a.recover_signer().expect("tx signer recovery succeeds"); + +// provider.add_account(signer, ExtendedAccount::new(0, U256::from(1_000_000_000_000_000u64))); +// provider.add_account(recipient, ExtendedAccount::new(0, U256::ZERO)); +// provider.add_account( +// L1_BLOCK_CONTRACT, +// ExtendedAccount::new(1, U256::ZERO).extend_storage([ +// (StorageKey::with_last_byte(1), StorageValue::from(1_000_000_000u64)), +// (StorageKey::with_last_byte(5), StorageValue::from(188u64)), +// (StorageKey::with_last_byte(6), StorageValue::from(684_000u64)), +// ( +// StorageKey::with_last_byte(3), +// StorageValue::from_str( +// "0x0000000000000000000000000000000000001db0000d27300000000000000005", +// ) +// .expect("valid L1 fee scalar storage value"), +// ), +// ]), +// ); + +// let latest = provider +// .latest_header() +// .expect("provider latest header query succeeds") +// .expect("genesis header exists"); + +// let base = OpFlashblockPayloadBase { +// parent_hash: latest.hash(), +// parent_beacon_block_root: B256::ZERO, +// fee_recipient: Address::ZERO, +// prev_randao: B256::repeat_byte(0x55), +// block_number: latest.number() + 1, +// gas_limit: 30_000_000, +// timestamp: latest.timestamp() + 2, +// extra_data: Default::default(), +// base_fee_per_gas: U256::from(1_000_000_000u64), +// }; +// let base_parent_hash = base.parent_hash; + +// let tx_a_hash = B256::from(*tx_a.tx_hash()); +// let tx_b_hash = B256::from(*tx_b.tx_hash()); +// let tx_c_hash = B256::from(*tx_c.tx_hash()); + +// let tx_a = into_encoded_recovered(tx_a, signer); +// let tx_b = into_encoded_recovered(tx_b, signer); +// let tx_c = into_encoded_recovered(tx_c, signer); + +// let evm_config = OpEvmConfig::optimism(OP_MAINNET.clone()); +// let mut builder = FlashBlockBuilder::::new(evm_config, provider); + +// let first = builder +// .execute(BuildArgs { +// base: base.clone(), +// transactions: vec![tx_a.clone(), tx_b.clone()], +// cached_state: None, +// last_flashblock_index: 0, +// last_flashblock_hash: B256::repeat_byte(0xA0), +// compute_state_root: false, +// pending_parent: None, +// }) +// .expect("first build succeeds") +// .expect("first build is canonical"); + +// assert_eq!(first.pending_state.execution_outcome.result.receipts.len(), 2); + +// let cached_hashes = vec![tx_a_hash, tx_b_hash]; +// let (bundle, receipts, requests, gas_used, blob_gas_used, skip) = builder +// .tx_cache +// .get_resumable_state_with_execution_meta_for_parent( +// base.block_number, +// base_parent_hash, +// &cached_hashes, +// ) +// .expect("cache should contain first build execution state"); +// assert_eq!(skip, 2); + +// let mut tampered_receipts = receipts.to_vec(); +// tampered_receipts[0].as_receipt_mut().cumulative_gas_used = +// tampered_receipts[0].as_receipt().cumulative_gas_used.saturating_add(17); +// let expected_tampered_gas = tampered_receipts[0].as_receipt().cumulative_gas_used; + +// builder.tx_cache.update_with_execution_meta_for_parent( +// base.block_number, +// base_parent_hash, +// cached_hashes, +// bundle.clone(), +// tampered_receipts, +// CachedExecutionMeta { requests: requests.clone(), gas_used, blob_gas_used }, +// ); + +// let second_hashes = vec![tx_a_hash, tx_b_hash, tx_c_hash]; +// let (_, _, _, _, _, skip) = builder +// .tx_cache +// .get_resumable_state_with_execution_meta_for_parent( +// base.block_number, +// base_parent_hash, +// &second_hashes, +// ) +// .expect("second tx list should extend cached prefix"); +// assert_eq!(skip, 2); + +// let second = builder +// .execute(BuildArgs { +// base, +// transactions: vec![tx_a, tx_b, tx_c], +// cached_state: None, +// last_flashblock_index: 1, +// last_flashblock_hash: B256::repeat_byte(0xA1), +// compute_state_root: false, +// pending_parent: None, +// }) +// .expect("second build succeeds") +// .expect("second build is canonical"); + +// let receipts = &second.pending_state.execution_outcome.result.receipts; +// assert_eq!(receipts.len(), 3); +// assert_eq!(receipts[0].as_receipt().cumulative_gas_used, expected_tampered_gas); +// assert!( +// receipts[2].as_receipt().cumulative_gas_used +// > receipts[1].as_receipt().cumulative_gas_used +// ); +// } +// } diff --git a/crates/flashblocks/src/lib.rs b/crates/flashblocks/src/lib.rs index e2189344..034656f7 100644 --- a/crates/flashblocks/src/lib.rs +++ b/crates/flashblocks/src/lib.rs @@ -11,7 +11,7 @@ mod ws; mod test_utils; pub use cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}; -pub use execution::FlashblockCachedReceipt; +pub(crate) use execution::{BuildArgs, FlashblockCachedReceipt}; pub use service::FlashblocksRpcService; pub use subscription::FlashblocksPubSub; pub use ws::WsFlashBlockStream; From 079ecb7279281461973f4615357067a3213c4157 Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 16 Mar 2026 17:16:34 +0800 Subject: [PATCH 31/76] feat: revamp flashblocks execution logic, use sync SR calc first MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- Cargo.lock | 13 +- Cargo.toml | 2 +- crates/flashblocks/Cargo.toml | 1 + crates/flashblocks/src/cache/confirm.rs | 44 +- crates/flashblocks/src/cache/mod.rs | 22 +- crates/flashblocks/src/cache/pending.rs | 16 - crates/flashblocks/src/execution/mod.rs | 3 +- crates/flashblocks/src/execution/worker.rs | 833 ++++++++------------- crates/flashblocks/src/lib.rs | 2 +- 9 files changed, 350 insertions(+), 586 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 75657ccf..9e418d04 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8073,7 +8073,7 @@ dependencies = [ "reth-node-api", "reth-primitives-traits", "reth-tracing", - "ringbuffer 0.16.0", + "ringbuffer", "serde", "serde_json", "tokio", @@ -9554,7 +9554,7 @@ dependencies = [ "reth-rpc-eth-types", "reth-storage-api", "reth-tasks", - "ringbuffer 0.16.0", + "ringbuffer", "serde_json", "tokio", "tokio-tungstenite 0.28.0", @@ -11028,12 +11028,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "ringbuffer" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3df6368f71f205ff9c33c076d170dd56ebf68e8161c733c0caa07a7a5509ed53" - [[package]] name = "ringbuffer" version = "0.16.0" @@ -14221,7 +14215,8 @@ dependencies = [ "reth-storage-api", "reth-tasks", "reth-tracing", - "ringbuffer 0.15.0", + "reth-trie-common", + "ringbuffer", "serde", "serde_json", "test-case", diff --git a/Cargo.toml b/Cargo.toml index ea693877..f3e473b5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -241,7 +241,7 @@ tracing = { version = "0.1.41" } shellexpand = "3.1" url = "2.5" brotli = "8.0" -ringbuffer = "0.15" +ringbuffer = "=0.16.0" # p2p libp2p = { version = "0.56", features = ["identify", "ping", "noise", "tcp", "autonat", "mdns", "tokio", "cbor", "macros", "yamux", "dns"] } diff --git a/crates/flashblocks/Cargo.toml b/crates/flashblocks/Cargo.toml index 3c47a756..7eb7569b 100644 --- a/crates/flashblocks/Cargo.toml +++ b/crates/flashblocks/Cargo.toml @@ -33,6 +33,7 @@ reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-optimism-flashblocks.workspace = true reth-storage-api.workspace = true +reth-trie-common.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-tracing.workspace = true diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index 512f9525..f99a1f83 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -16,17 +16,17 @@ const DEFAULT_CONFIRM_BLOCK_CACHE_SIZE: usize = 1_000; const DEFAULT_TX_CACHE_SIZE: usize = DEFAULT_CONFIRM_BLOCK_CACHE_SIZE * 10_000; #[derive(Debug)] -pub struct ConfirmedBlock { +pub(crate) struct ConfirmedBlock { /// The locally built pending block with execution output. - pub executed_block: ExecutedBlock, + pub(crate) executed_block: ExecutedBlock, /// The receipts for the pending block - pub receipts: Arc>>, + pub(crate) receipts: Arc>>, } impl ConfirmedBlock { /// Returns a pair of [`RecoveredBlock`] and a vector of [`NodePrimitives::Receipt`]s by /// cloning from borrowed self. - pub fn to_block_and_receipts(&self) -> BlockAndReceipts { + pub(crate) fn to_block_and_receipts(&self) -> BlockAndReceipts { BlockAndReceipts { block: self.executed_block.recovered_block.clone(), receipts: self.receipts.clone(), @@ -45,7 +45,7 @@ impl ConfirmedBlock { /// Transaction data is stored in a `HashMap` which indexes transaction hashes to /// [`CachedTxInfo`] for O(1) tx/receipt lookups. #[derive(Debug)] -pub struct ConfirmCache { +pub(crate) struct ConfirmCache { /// Primary storage: block number → (block hash, block + receipts). /// `BTreeMap` ordering enables efficient range-based flush via `split_off`. blocks: BTreeMap)>, @@ -63,7 +63,7 @@ impl Default for ConfirmCache { impl ConfirmCache { /// Creates a new [`ConfirmCache`]. - pub fn new() -> Self { + pub(crate) fn new() -> Self { Self { blocks: BTreeMap::new(), hash_to_number: HashMap::with_capacity(DEFAULT_CONFIRM_BLOCK_CACHE_SIZE), @@ -72,17 +72,17 @@ impl ConfirmCache { } /// Returns the number of cached entries. - pub fn len(&self) -> usize { + pub(crate) fn len(&self) -> usize { self.blocks.len() } /// Returns `true` if the cache is empty. - pub fn is_empty(&self) -> bool { + pub(crate) fn is_empty(&self) -> bool { self.blocks.is_empty() } /// Inserts a confirmed block into the cache, indexed by block number and block hash. - pub fn insert( + pub(crate) fn insert( &mut self, height: u64, executed_block: ExecutedBlock, @@ -118,34 +118,37 @@ impl ConfirmCache { } /// Clears all entries. - pub fn clear(&mut self) { + pub(crate) fn clear(&mut self) { self.tx_index.clear(); self.blocks.clear(); self.hash_to_number.clear(); } /// Returns the block number for the given block hash, if cached. - pub fn number_for_hash(&self, block_hash: &B256) -> Option { + pub(crate) fn number_for_hash(&self, block_hash: &B256) -> Option { self.hash_to_number.get(block_hash).copied() } /// Returns the block hash for the given block number, if cached. - pub fn hash_for_number(&self, block_number: u64) -> Option { + pub(crate) fn hash_for_number(&self, block_number: u64) -> Option { self.blocks.get(&block_number).map(|(hash, _)| *hash) } /// Returns the confirmed block for the given block hash, if present. - pub fn get_block_by_hash(&self, block_hash: &B256) -> Option> { + pub(crate) fn get_block_by_hash(&self, block_hash: &B256) -> Option> { self.get_block_by_number(self.number_for_hash(block_hash)?) } /// Returns the confirmed block for the given block number, if present. - pub fn get_block_by_number(&self, block_number: u64) -> Option> { + pub(crate) fn get_block_by_number(&self, block_number: u64) -> Option> { self.blocks.get(&block_number).map(|(_, entry)| entry.to_block_and_receipts()) } /// Returns the cached transaction info for the given tx hash, if present. - pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option<(CachedTxInfo, BlockAndReceipts)> { + pub(crate) fn get_tx_info( + &self, + tx_hash: &TxHash, + ) -> Option<(CachedTxInfo, BlockAndReceipts)> { let tx_info = self.tx_index.get(tx_hash).cloned()?; let block = self.get_block_by_number(tx_info.block_number)?; Some((tx_info, block)) @@ -155,7 +158,7 @@ impl ConfirmCache { /// ordered newest to oldest (for use with `MemoryOverlayStateProvider`). /// /// Returns an error if state cache pollution detected (non-contiguous blocks). - pub fn get_executed_blocks_up_to_height( + pub(crate) fn get_executed_blocks_up_to_height( &self, target_height: u64, canon_height: u64, @@ -190,7 +193,10 @@ impl ConfirmCache { } /// Removes and returns the confirmed block for the given block number. - pub fn remove_block_by_number(&mut self, block_number: u64) -> Option> { + pub(crate) fn remove_block_by_number( + &mut self, + block_number: u64, + ) -> Option> { let (hash, block) = self.blocks.remove(&block_number)?; self.hash_to_number.remove(&hash); self.remove_tx_index_for_block(&block); @@ -198,7 +204,7 @@ impl ConfirmCache { } /// Removes and returns the confirmed block for the given block hash. - pub fn remove_block_by_hash(&mut self, block_hash: &B256) -> Option> { + pub(crate) fn remove_block_by_hash(&mut self, block_hash: &B256) -> Option> { let number = self.hash_to_number.remove(block_hash)?; let (_, block) = self.blocks.remove(&number)?; self.remove_tx_index_for_block(&block); @@ -216,7 +222,7 @@ impl ConfirmCache { /// /// Called when the canonical chain catches up to the confirmed cache. Returns /// the number of entries flushed. - pub fn flush_up_to_height(&mut self, canon_height: u64) -> usize { + pub(crate) fn flush_up_to_height(&mut self, canon_height: u64) -> usize { let retained = self.blocks.split_off(&(canon_height + 1)); let stale = std::mem::replace(&mut self.blocks, retained); let count = stale.len(); diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 17b99eb7..6683ae72 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -3,9 +3,10 @@ pub mod pending; pub(crate) mod raw; pub(crate) mod utils; -pub use confirm::ConfirmCache; +pub(crate) use confirm::ConfirmCache; +pub(crate) use raw::RawFlashblocksCache; + pub use pending::PendingSequence; -pub use raw::RawFlashblocksCache; use crate::{FlashblockCachedReceipt, PendingSequenceRx}; use parking_lot::RwLock; @@ -86,6 +87,11 @@ where self.inner.read().pending_cache.as_ref().map(|p| p.get_height()) } + /// Returns a clone of the current pending sequence, if any. + pub fn get_pending_sequence(&self) -> Option> { + self.inner.read().pending_cache.clone() + } + pub fn get_rpc_block_by_id(&self, block_id: Option) -> Option> { match block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)) { BlockId::Number(id) => self.get_rpc_block(id), @@ -163,6 +169,18 @@ where let in_memory = guard.get_state_provider_at_height(block_num, canonical_state)?; Some((in_memory, block.clone_sealed_header())) } + + pub fn get_state_provider_by_hash( + &self, + block_hash: B256, + canonical_state: StateProviderBox, + ) -> Option<(StateProviderBox, SealedHeaderFor)> { + let mut guard = self.inner.write(); + let block = guard.get_block_by_hash(&block_hash)?.block; + let block_num = block.number(); + let in_memory = guard.get_state_provider_at_height(block_num, canonical_state)?; + Some((in_memory, block.clone_sealed_header())) + } } // FlashblockStateCache state mutation interfaces. diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs index 27d49d00..43408424 100644 --- a/crates/flashblocks/src/cache/pending.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -28,14 +28,6 @@ where pub parent_hash: B256, /// The last flashblock index of the latest flashblocks sequence. pub last_flashblock_index: u64, - /// Cached number of transactions covered by the pending sequence execution. - cached_tx_count: usize, - /// Cached receipts for the prefix. - pub cached_receipts: Vec, - /// Total gas used by the pending sequence. - pub cached_gas_used: u64, - /// Total blob/DA gas used by the pending sequence. - pub cached_blob_gas_used: u64, } impl PendingSequence @@ -89,10 +81,6 @@ mod tests { block_hash, parent_hash, last_flashblock_index: 0, - cached_tx_count: 0, - cached_receipts: vec![], - cached_gas_used: 0, - cached_blob_gas_used: 0, } } @@ -127,10 +115,6 @@ mod tests { block_hash, parent_hash, last_flashblock_index: 0, - cached_tx_count: 0, - cached_receipts: vec![], - cached_gas_used: 0, - cached_blob_gas_used: 0, } } diff --git a/crates/flashblocks/src/execution/mod.rs b/crates/flashblocks/src/execution/mod.rs index 70904c40..89adac12 100644 --- a/crates/flashblocks/src/execution/mod.rs +++ b/crates/flashblocks/src/execution/mod.rs @@ -10,7 +10,7 @@ pub(crate) struct BuildArgs { } /// Receipt requirements for cache-resume flow. -pub(crate) trait FlashblockCachedReceipt: Clone { +pub trait FlashblockCachedReceipt: Clone { /// Adds `gas_offset` to each receipt's `cumulative_gas_used`. fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64); } @@ -20,7 +20,6 @@ impl FlashblockCachedReceipt for OpReceipt { if gas_offset == 0 { return; } - for receipt in receipts { let inner = receipt.as_receipt_mut(); inner.cumulative_gas_used = inner.cumulative_gas_used.saturating_add(gas_offset); diff --git a/crates/flashblocks/src/execution/worker.rs b/crates/flashblocks/src/execution/worker.rs index 973bc419..5f7d9e05 100644 --- a/crates/flashblocks/src/execution/worker.rs +++ b/crates/flashblocks/src/execution/worker.rs @@ -1,16 +1,16 @@ use crate::{ - cache::{FlashblockStateCache, PendingSequence}, - BuildArgs, FlashblockCachedReceipt, + cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}, + execution::BuildArgs, + FlashblockCachedReceipt, }; +use alloy_eips::eip2718::WithEncoded; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; use std::{ + collections::HashMap, sync::Arc, time::{Duration, Instant}, }; -use tracing::trace; - -use alloy_eips::{eip2718::WithEncoded, BlockNumberOrTag}; -use alloy_primitives::B256; -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; +use tracing::*; use reth_chain_state::{ComputedTrieData, ExecutedBlock}; use reth_errors::RethError; @@ -22,22 +22,26 @@ use reth_evm::{ }; use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; use reth_primitives_traits::{ - transaction::TxHashRef, AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy, - Recovered, RecoveredBlock, SealedHeader, + transaction::TxHashRef, BlockBody, HeaderTy, NodePrimitives, Recovered, RecoveredBlock, }; use reth_revm::{ + cached::CachedReads, database::StateProviderDatabase, db::{states::bundle_state::BundleRetention, BundleState, State}, }; -use reth_rpc_eth_types::{EthApiError, PendingBlock}; +use reth_rpc_eth_types::PendingBlock; use reth_storage_api::{ - noop::NoopProvider, BlockReaderIdExt, HashedPostStateProvider, StateProviderFactory, - StateRootProvider, + HashedPostStateProvider, HeaderProvider, StateProviderFactory, StateRootProvider, }; +use reth_trie_common::HashedPostState; -/// The `FlashblocksValidator` builds [`PendingBlock`] out of a sequence of transactions. +/// Builds the [`PendingSequence`]s from the accumulated flashblock transaction sequences. +/// Commits results directly to [`FlashblockStateCache`] via `handle_pending_sequence()`. /// -/// Owns a [`TransactionCache`] for incremental prefix caching between flashblock builds. +/// Supports two execution modes: +/// - **Fresh**: Full execution for a new block height. +/// - **Incremental**: Suffix-only execution reusing cached prefix state from an existing +/// pending sequence at the same height. #[derive(Debug)] pub(crate) struct FlashblockSequenceValidator where @@ -48,7 +52,6 @@ where /// The canonical chainstate provider. provider: Provider, /// The flashblocks state cache containing the flashblocks state cache layer. - /// state cache layer. flashblocks_state: FlashblockStateCache, } @@ -74,562 +77,320 @@ where N: NodePrimitives, N::Receipt: FlashblockCachedReceipt, EvmConfig: ConfigureEvm + Unpin>, - Provider: StateProviderFactory - + BlockReaderIdExt< - Header = HeaderTy, - Block = BlockTy, - Transaction = N::SignedTx, - Receipt = ReceiptTy, - > + Unpin, + Provider: StateProviderFactory + HeaderProvider
> + Unpin, { - /// Returns the [`PendingSequence`], which contains the full built execution state of - /// the flashblocks sequence passed in `BuildArgs`. - /// - /// The + /// Executes a flashblock transaction sequence and commits the result to the flashblocks + /// state cache. Note that the flashblocks sequence validator should be the only handle + /// that advances the flashblocks state cache tip. /// - /// In canonical mode, the internal transaction cache is used to resume from - /// cached state if the transaction list is a continuation of what was previously - /// executed. - /// - /// Returns `None` if: - /// - In canonical mode: flashblock doesn't attach to the latest header - /// - In speculative mode: no pending parent state provided + /// Determines execution mode from the current pending state: + /// - No pending sequence exists, cache not yet initialized → fresh build. + /// - Pending is at a different height → fresh build. + /// - If pending exists at the same height → incremental build. pub(crate) fn execute>>>( &mut self, - mut args: BuildArgs, + args: BuildArgs, ) -> eyre::Result<()> { - trace!(target: "flashblocks", "Attempting new pending block from flashblocks"); - - let parent_hash = args.base.parent_hash; - let parent_header = self.state_cache.latest_header(parent_hash)?; - let state_provider = self.state_cache.history_by_block_hash(parent_header.hash())?; - - let latest = self - .provider - .latest_header()? - .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; - let latest_hash = latest.hash(); + let block_number = args.base.block_number; + let transactions: Vec<_> = args.transactions.into_iter().collect(); - // Determine build mode: canonical (parent is local tip) or speculative (parent is pending) - let is_canonical = args.base.parent_hash == latest_hash; - let has_pending_parent = args.pending_parent.is_some(); + // Determine execution mode from pending state + let pending = self.flashblocks_state.get_pending_sequence(); + let pending_height = pending.as_ref().map(|p| p.get_height()); + let incremental = pending_height == Some(block_number); - if !is_canonical && !has_pending_parent { - trace!( + // Validate height continuity + if let Some(pending_height) = pending_height + && block_number != pending_height + && block_number != pending_height + 1 + { + // State cache is polluted + warn!( target: "flashblocks", - flashblock_parent = ?args.base.parent_hash, - local_latest = ?latest.num_hash(), - "Skipping non-consecutive flashblock (no pending parent available)" + incoming_height = block_number, + pending_height = pending_height, + "state mismatch from incoming sequence to current pending tip", ); - return Ok(None); + return Err(eyre::eyre!( + "state mismatch from incoming sequence to current pending tip" + )); } - // Collect transactions and extract hashes for cache lookup - let transactions: Vec<_> = args.transactions.into_iter().collect(); - let tx_hashes: Vec = transactions.iter().map(|tx| *tx.tx_hash()).collect(); - - // Get state provider and parent header context. - // For speculative builds, use the canonical anchor hash (not the pending parent hash) - // for storage reads, but execute with the pending parent's sealed header context. - let (state_provider, canonical_anchor, parent_header) = if is_canonical { - (self.provider.history_by_block_hash(latest.hash())?, latest.hash(), &latest) - } else { - // For speculative building, we need to use the canonical anchor - // and apply the pending state's bundle on top of it - let pending = args.pending_parent.as_ref().unwrap(); - let Some(parent_header) = pending.sealed_header.as_ref() else { - trace!( - target: "flashblocks", - pending_block_number = pending.block_number, - pending_block_hash = ?pending.block_hash, - "Skipping speculative build: pending parent header is unavailable" - ); - return Ok(None); - }; - if !is_consistent_speculative_parent_hashes( - args.base.parent_hash, - pending.block_hash, - parent_header.hash(), - ) { - trace!( - target: "flashblocks", - incoming_parent_hash = ?args.base.parent_hash, - pending_block_hash = ?pending.block_hash, - pending_sealed_hash = ?parent_header.hash(), - pending_block_number = pending.block_number, - "Skipping speculative build: inconsistent pending parent hashes" - ); - return Ok(None); - } - trace!( - target: "flashblocks", - pending_block_number = pending.block_number, - pending_block_hash = ?pending.block_hash, - canonical_anchor = ?pending.canonical_anchor_hash, - "Building speculatively on pending state" - ); - ( - self.provider.history_by_block_hash(pending.canonical_anchor_hash)?, - pending.canonical_anchor_hash, - parent_header, + if incremental { + self.execute_incremental( + args.base, + transactions, + args.last_flashblock_index, + pending.unwrap(), ) - }; - - // Set up cached reads - let cache_key = if is_canonical { latest_hash } else { args.base.parent_hash }; - let mut request_cache = args - .cached_state - .take() - .filter(|(hash, _)| hash == &cache_key) - .map(|(_, state)| state) - .unwrap_or_else(|| { - // For speculative builds, use cached reads from pending parent - args.pending_parent.as_ref().map(|p| p.cached_reads.clone()).unwrap_or_default() - }); - - let cached_db = request_cache.as_db_mut(StateProviderDatabase::new(&state_provider)); - - // Check for resumable canonical execution state. - let canonical_parent_hash = args.base.parent_hash; - let cached_prefix = if is_canonical { - self.tx_cache - .get_resumable_state_with_execution_meta_for_parent( - args.base.block_number, - canonical_parent_hash, - &tx_hashes, - ) - .map(|(bundle, receipts, _requests, gas_used, blob_gas_used, cached_tx_count)| { - trace!( - target: "flashblocks", - cached_tx_count, - total_txs = tx_hashes.len(), - "Cache hit (executing only uncached suffix)" - ); - CachedPrefixExecutionResult { - cached_tx_count, - bundle: bundle.clone(), - receipts: receipts.to_vec(), - gas_used, - blob_gas_used, - } - }) } else { - None - }; - - // Build state with appropriate prestate - // - Speculative builds use pending parent prestate - // - Canonical cache-hit builds use cached prefix prestate - let mut state = if let Some(ref pending) = args.pending_parent { - State::builder() - .with_database(cached_db) - .with_bundle_prestate(pending.execution_outcome.state.clone()) - .with_bundle_update() - .build() - } else if let Some(ref cached_prefix) = cached_prefix { - State::builder() - .with_database(cached_db) - .with_bundle_prestate(cached_prefix.bundle.clone()) - .with_bundle_update() - .build() - } else { - State::builder().with_database(cached_db).with_bundle_update().build() - }; + self.execute_fresh(args.base, transactions, args.last_flashblock_index) + } + } - let (execution_result, block, hashed_state, bundle) = if let Some(cached_prefix) = - cached_prefix + /// Full flashblocks sequence execution from a new block height. + fn execute_fresh( + &self, + base: OpFlashblockPayloadBase, + transactions: Vec>>, + last_flashblock_index: u64, + ) -> eyre::Result<()> { + let parent_hash = base.parent_hash; + + // Prioritize trying to get parent hash state from canonical provider first. If + // the parent is not in the canonical chain, then try building fresh on top of + // the current pending sequence (current pending promoted to confirm, incoming + // sequence is the next height). Fall back to the flashblocks overlay via + // `get_pending_state_provider`. + let (state_provider, parent_header) = match self.provider.history_by_block_hash(parent_hash) { - // Cached prefix execution model: - // - The cached bundle prestate already includes pre-execution state changes - // (blockhash/beacon root updates, create2deployer), so we do NOT call - // apply_pre_execution_changes() again. - // - The only pre-execution effect we need is set_state_clear_flag, which configures EVM - // empty-account handling (OP Stack chains activate Spurious Dragon at genesis, so - // this is always true). - // - Suffix transactions execute against the warm prestate. - // - Post-execution (finish()) runs once on the suffix executor, producing correct - // results for the full block. For OP Stack post-merge, the - // post_block_balance_increments are empty (no block rewards, no ommers, no - // withdrawals passed), so finish() only seals execution state. - let attrs = args.base.clone().into(); - let evm_env = - self.evm_config.next_evm_env(parent_header, &attrs).map_err(RethError::other)?; - let execution_ctx = self - .evm_config - .context_for_next_block(parent_header, attrs) - .map_err(RethError::other)?; - - // The cached bundle prestate already includes pre-execution state changes. - // Only set the state clear flag (Spurious Dragon empty-account handling). - state.set_state_clear_flag(true); - let evm = self.evm_config.evm_with_env(&mut state, evm_env); - let mut executor = self.evm_config.create_executor(evm, execution_ctx.clone()); - - for tx in transactions.iter().skip(cached_prefix.cached_tx_count).cloned() { - let _gas_used = executor.execute_transaction(tx)?; + Ok(canon_provider) => { + let header = self + .provider + .sealed_header_by_hash(parent_hash)? + .ok_or_else(|| eyre::eyre!("parent header not found for hash {parent_hash}"))?; + (canon_provider, header) } - - let (evm, suffix_execution_result) = executor.finish()?; - let (db, evm_env) = evm.finish(); - db.merge_transitions(BundleRetention::Reverts); - - let execution_result = - Self::merge_cached_and_suffix_results(cached_prefix, suffix_execution_result); - - let (hashed_state, state_root) = if args.compute_state_root { - trace!(target: "flashblocks", "Computing block state root"); - let hashed_state = state_provider.hashed_post_state(&db.bundle_state); - let (state_root, _) = state_provider - .state_root_with_updates(hashed_state.clone()) - .map_err(RethError::other)?; - (hashed_state, state_root) - } else { - let noop_provider = NoopProvider::default(); - let hashed_state = noop_provider.hashed_post_state(&db.bundle_state); - let (state_root, _) = noop_provider - .state_root_with_updates(hashed_state.clone()) - .map_err(RethError::other)?; - (hashed_state, state_root) - }; - let bundle = db.take_bundle(); - - let (block_transactions, senders): (Vec<_>, Vec<_>) = - transactions.iter().map(|tx| tx.1.clone().into_parts()).unzip(); - let block = self - .evm_config - .block_assembler() - .assemble_block(BlockAssemblerInput::new( - evm_env, - execution_ctx, - parent_header, - block_transactions, - &execution_result, - &bundle, - &state_provider, - state_root, - )) - .map_err(RethError::other)?; - let block = RecoveredBlock::new_unhashed(block, senders); - - (execution_result, block, hashed_state, bundle) - } else { - let mut builder = self - .evm_config - .builder_for_next_block(&mut state, parent_header, args.base.clone().into()) - .map_err(RethError::other)?; - - builder.apply_pre_execution_changes()?; - - for tx in transactions { - let _gas_used = builder.execute_transaction(tx)?; + Err(err) => { + trace!( + target: "flashblocks", + error = %err, + "parent not in canonical chain, try getting state from pending state", + ); + let canonical_state = self.provider.latest()?; + self.flashblocks_state.get_pending_state_provider(canonical_state).ok_or_else( + || { + eyre::eyre!( + "parent {parent_hash} not in canonical chain and no \ + pending state available for overlay" + ) + }, + )? } - - let BlockBuilderOutcome { execution_result, block, hashed_state, .. } = - if args.compute_state_root { - trace!(target: "flashblocks", "Computing block state root"); - builder.finish(&state_provider)? - } else { - builder.finish(NoopProvider::default())? - }; - let bundle = state.take_bundle(); - - (execution_result, block, hashed_state, bundle) }; - // Update internal transaction cache (only in canonical mode) - if is_canonical { - self.tx_cache.update_with_execution_meta_for_parent( - args.base.block_number, - canonical_parent_hash, - tx_hashes, - bundle.clone(), - execution_result.receipts.clone(), - CachedExecutionMeta { - requests: execution_result.requests.clone(), - gas_used: execution_result.gas_used, - blob_gas_used: execution_result.blob_gas_used, - }, - ); + let mut request_cache = CachedReads::default(); + let cached_db = + request_cache.as_db_mut(StateProviderDatabase::new(state_provider.as_ref())); + let mut state = State::builder().with_database(cached_db).with_bundle_update().build(); + + let mut builder = self + .evm_config + .builder_for_next_block(&mut state, &parent_header, base.clone().into()) + .map_err(RethError::other)?; + builder.apply_pre_execution_changes()?; + for tx in &transactions { + builder.execute_transaction(tx.clone())?; } + let BlockBuilderOutcome { execution_result, block, hashed_state, .. } = + builder.finish(state_provider.as_ref())?; + let bundle = state.take_bundle(); + + self.commit_pending_sequence( + base, + transactions, + last_flashblock_index, + execution_result, + block, + hashed_state, + bundle, + request_cache, + ) + } - let execution_outcome = BlockExecutionOutput { state: bundle, result: execution_result }; - let execution_outcome = Arc::new(execution_outcome); - - // Create pending state for subsequent builds. - // Use the locally built block hash for both parent matching and speculative - // execution context to avoid split-hash ambiguity. - let local_block_hash = block.hash(); - if local_block_hash != args.last_flashblock_hash { - trace!( + /// Incremental execution for the same block height as the current pending. Reuses + /// the pending sequence's `BundleState` as prestate and its warm `CachedReads`, + /// executing only new unexecuted transactions from incremental flashblock payloads. + fn execute_incremental( + &self, + base: OpFlashblockPayloadBase, + transactions: Vec>>, + last_flashblock_index: u64, + pending: PendingSequence, + ) -> eyre::Result<()> { + if pending.last_flashblock_index != last_flashblock_index { + // State cache is polluted + warn!( target: "flashblocks", - local_block_hash = ?local_block_hash, - sequencer_block_hash = ?args.last_flashblock_hash, - block_number = block.number(), - "Local block hash differs from sequencer-provided hash; speculative chaining will follow local hash" + incoming_last_flashblock_index = last_flashblock_index, + pending_last_flashblock_index = pending.last_flashblock_index, + "state mismatch from incoming sequence to current pending tip", ); + return Err(eyre::eyre!( + "state mismatch, last flashblock index mismatch pending index" + )); } - let sealed_header = SealedHeader::new(block.header().clone(), local_block_hash); - let pending_state = PendingBlockState::new( - local_block_hash, - block.number(), - args.base.parent_hash, - canonical_anchor, - execution_outcome.clone(), - request_cache.clone(), + + // Get latest canonical state, then overlay flashblocks state cache blocks + // from canonical height up to the parent hash. This handles the case where + // the parent is a flashblocks-confirmed block ahead of canonical. + let parent_hash = base.parent_hash; + let canonical_state = self.provider.latest()?; + let (state_provider, parent_header) = self + .flashblocks_state + .get_state_provider_by_hash(parent_hash, canonical_state) + .ok_or_else(|| { + eyre::eyre!("failed to build overlay state provider for parent {parent_hash}") + })?; + + // Extract prestate from current pending + let exec_output = &pending.pending.executed_block.execution_output; + let prestate_bundle = exec_output.state.clone(); + let cached_tx_count = + pending.pending.executed_block.recovered_block.body().transaction_count(); + let cached_receipts = exec_output.result.receipts.clone(); + let cached_gas_used = exec_output.result.gas_used; + let cached_blob_gas_used = exec_output.result.blob_gas_used; + + // Set up state DB with pending's warm CachedReads + prestate bundle + let mut request_cache = pending.cached_reads; + let cached_db = + request_cache.as_db_mut(StateProviderDatabase::new(state_provider.as_ref())); + let mut state = State::builder() + .with_database(cached_db) + .with_bundle_prestate(prestate_bundle) + .with_bundle_update() + .build(); + + let attrs = base.clone().into(); + let evm_env = + self.evm_config.next_evm_env(&parent_header, &attrs).map_err(RethError::other)?; + let execution_ctx = self + .evm_config + .context_for_next_block(&parent_header, attrs) + .map_err(RethError::other)?; + + // Skip apply_pre_execution_changes (already applied in the original fresh build). + // The only pre-execution effect we need is set_state_clear_flag, which configures EVM + // empty-account handling (OP Stack chains activate Spurious Dragon at genesis, so + // this is always true). + state.set_state_clear_flag(true); + let evm = self.evm_config.evm_with_env(&mut state, evm_env); + let mut executor = self.evm_config.create_executor(evm, execution_ctx.clone()); + + for tx in transactions.iter().skip(cached_tx_count).cloned() { + executor.execute_transaction(tx)?; + } + + let (evm, execution_result) = executor.finish()?; + let (db, evm_env) = evm.finish(); + db.merge_transitions(BundleRetention::Reverts); + + let execution_result = Self::merge_cached_block_execution_results( + cached_receipts, + cached_gas_used, + cached_blob_gas_used, + execution_result, + ); + + // Compute state root via sparse trie + let hashed_state = state_provider.hashed_post_state(&db.bundle_state); + let (state_root, _) = state_provider + .state_root_with_updates(hashed_state.clone()) + .map_err(RethError::other)?; + let bundle = db.take_bundle(); + + // Assemble block + let (block_transactions, senders): (Vec<_>, Vec<_>) = + transactions.iter().map(|tx| tx.1.clone().into_parts()).unzip(); + let block = self + .evm_config + .block_assembler() + .assemble_block(BlockAssemblerInput::new( + evm_env, + execution_ctx, + &parent_header, + block_transactions, + &execution_result, + &bundle, + state_provider.as_ref(), + state_root, + )) + .map_err(RethError::other)?; + let block = RecoveredBlock::new_unhashed(block, senders); + + self.commit_pending_sequence( + base, + transactions, + last_flashblock_index, + execution_result, + block, + hashed_state, + bundle, + request_cache, ) - .with_sealed_header(sealed_header); + } - let pending_block = PendingBlock::with_executed_block( - Instant::now() + Duration::from_secs(1), - ExecutedBlock::new( - block.into(), - execution_outcome, - ComputedTrieData::without_trie_input( - Arc::new(hashed_state.into_sorted()), - Arc::default(), - ), + /// Builds a [`PendingSequence`] and commits it to the flashblocks state cache. + #[expect(clippy::too_many_arguments)] + fn commit_pending_sequence( + &self, + base: OpFlashblockPayloadBase, + transactions: Vec>>, + last_flashblock_index: u64, + execution_result: BlockExecutionResult, + block: RecoveredBlock, + hashed_state: HashedPostState, + bundle: BundleState, + request_cache: CachedReads, + ) -> eyre::Result<()> { + let block_hash = block.hash(); + let parent_hash = base.parent_hash; + + // Build pending execution block + let execution_outcome = + Arc::new(BlockExecutionOutput { state: bundle, result: execution_result }); + let executed_block = ExecutedBlock::new( + block.into(), + execution_outcome.clone(), + ComputedTrieData::without_trie_input( + Arc::new(hashed_state.into_sorted()), + Arc::default(), ), ); - let pending_flashblock = PendingFlashBlock::new( - pending_block, - canonical_anchor, - args.last_flashblock_index, - args.last_flashblock_hash, - args.compute_state_root, + let pending_block = PendingBlock::with_executed_block( + Instant::now() + Duration::from_secs(1), + executed_block, ); - Ok(Some(BuildResult { pending_flashblock, cached_reads: request_cache, pending_state })) + // Build tx index + let mut tx_index = HashMap::with_capacity(transactions.len()); + for (idx, tx) in transactions.iter().enumerate() { + tx_index.insert( + *tx.tx_hash(), + CachedTxInfo { + block_number: base.block_number, + block_hash, + tx_index: idx as u64, + tx: tx.1.clone().into_inner(), + receipt: execution_outcome.result.receipts[idx].clone(), + }, + ); + } + self.flashblocks_state.handle_pending_sequence(PendingSequence { + pending: pending_block, + tx_index, + cached_reads: request_cache, + block_hash, + parent_hash, + last_flashblock_index, + }) } - fn merge_cached_and_suffix_results( - cached_prefix: CachedPrefixExecutionResult, - mut suffix_result: BlockExecutionResult, + fn merge_cached_block_execution_results( + cached_receipts: Vec, + cached_gas_used: u64, + cached_blob_gas_used: u64, + mut execution_result: BlockExecutionResult, ) -> BlockExecutionResult { - N::Receipt::add_cumulative_gas_offset(&mut suffix_result.receipts, cached_prefix.gas_used); - - let mut receipts = cached_prefix.receipts; - receipts.extend(suffix_result.receipts); - - // Use only suffix requests: the suffix executor's finish() produces - // post-execution requests from the complete block state (cached prestate + - // suffix changes). The cached prefix requests came from an intermediate - // state and must not be merged. - let requests = suffix_result.requests; - + N::Receipt::add_cumulative_gas_offset(&mut execution_result.receipts, cached_gas_used); + let mut receipts = cached_receipts; + receipts.extend(execution_result.receipts); BlockExecutionResult { receipts, - requests, - gas_used: cached_prefix.gas_used.saturating_add(suffix_result.gas_used), - blob_gas_used: cached_prefix.blob_gas_used.saturating_add(suffix_result.blob_gas_used), + requests: execution_result.requests, + gas_used: cached_gas_used.saturating_add(execution_result.gas_used), + blob_gas_used: cached_blob_gas_used.saturating_add(execution_result.blob_gas_used), } } } - -#[inline] -fn is_consistent_speculative_parent_hashes( - incoming_parent_hash: B256, - pending_block_hash: B256, - pending_sealed_hash: B256, -) -> bool { - incoming_parent_hash == pending_block_hash && pending_block_hash == pending_sealed_hash -} - -// #[cfg(test)] -// mod tests { -// use super::{is_consistent_speculative_parent_hashes, BuildArgs, FlashBlockBuilder}; -// use crate::execution::cache::CachedExecutionMeta; -// use alloy_consensus::{SignableTransaction, TxEip1559}; -// use alloy_eips::eip2718::Encodable2718; -// use alloy_network::TxSignerSync; -// use alloy_primitives::{Address, StorageKey, StorageValue, TxKind, B256, U256}; -// use alloy_signer_local::PrivateKeySigner; -// use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; -// use op_revm::constants::L1_BLOCK_CONTRACT; -// use reth_optimism_chainspec::OP_MAINNET; -// use reth_optimism_evm::OpEvmConfig; -// use reth_optimism_primitives::{OpBlock, OpPrimitives, OpTransactionSigned}; -// use reth_primitives_traits::{AlloyBlockHeader, Recovered, SignerRecoverable}; -// use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; -// use reth_provider::ChainSpecProvider; -// use reth_storage_api::BlockReaderIdExt; -// use std::str::FromStr; - -// fn signed_transfer_tx( -// signer: &PrivateKeySigner, -// nonce: u64, -// recipient: Address, -// ) -> OpTransactionSigned { -// let mut tx = TxEip1559 { -// chain_id: 10, // OP Mainnet chain id -// nonce, -// gas_limit: 100_000, -// max_priority_fee_per_gas: 1_000_000_000, -// max_fee_per_gas: 2_000_000_000, -// to: TxKind::Call(recipient), -// value: U256::from(1), -// ..Default::default() -// }; -// let signature = signer.sign_transaction_sync(&mut tx).expect("signing tx succeeds"); -// tx.into_signed(signature).into() -// } - -// fn into_encoded_recovered( -// tx: OpTransactionSigned, -// signer: Address, -// ) -> alloy_eips::eip2718::WithEncoded> { -// let encoded = tx.encoded_2718(); -// Recovered::new_unchecked(tx, signer).into_encoded_with(encoded) -// } - -// #[test] -// fn speculative_parent_hashes_must_all_match() { -// let h = B256::repeat_byte(0x11); -// assert!(is_consistent_speculative_parent_hashes(h, h, h)); -// } - -// #[test] -// fn speculative_parent_hashes_reject_any_mismatch() { -// let incoming = B256::repeat_byte(0x11); -// let pending = B256::repeat_byte(0x22); -// let sealed = B256::repeat_byte(0x33); - -// assert!(!is_consistent_speculative_parent_hashes(incoming, pending, sealed)); -// assert!(!is_consistent_speculative_parent_hashes(incoming, incoming, sealed)); -// assert!(!is_consistent_speculative_parent_hashes(incoming, pending, pending)); -// } - -// #[test] -// fn canonical_build_reuses_cached_prefix_execution() { -// let provider = MockEthProvider::::new().with_chain_spec(OP_MAINNET.clone()); -// let genesis_hash = provider.chain_spec().genesis_hash(); -// let genesis_block = -// OpBlock::new(provider.chain_spec().genesis_header().clone(), Default::default()); -// provider.add_block(genesis_hash, genesis_block); - -// let recipient = Address::repeat_byte(0x22); -// let signer = PrivateKeySigner::random(); -// let tx_a = signed_transfer_tx(&signer, 0, recipient); -// let tx_b = signed_transfer_tx(&signer, 1, recipient); -// let tx_c = signed_transfer_tx(&signer, 2, recipient); -// let signer = tx_a.recover_signer().expect("tx signer recovery succeeds"); - -// provider.add_account(signer, ExtendedAccount::new(0, U256::from(1_000_000_000_000_000u64))); -// provider.add_account(recipient, ExtendedAccount::new(0, U256::ZERO)); -// provider.add_account( -// L1_BLOCK_CONTRACT, -// ExtendedAccount::new(1, U256::ZERO).extend_storage([ -// (StorageKey::with_last_byte(1), StorageValue::from(1_000_000_000u64)), -// (StorageKey::with_last_byte(5), StorageValue::from(188u64)), -// (StorageKey::with_last_byte(6), StorageValue::from(684_000u64)), -// ( -// StorageKey::with_last_byte(3), -// StorageValue::from_str( -// "0x0000000000000000000000000000000000001db0000d27300000000000000005", -// ) -// .expect("valid L1 fee scalar storage value"), -// ), -// ]), -// ); - -// let latest = provider -// .latest_header() -// .expect("provider latest header query succeeds") -// .expect("genesis header exists"); - -// let base = OpFlashblockPayloadBase { -// parent_hash: latest.hash(), -// parent_beacon_block_root: B256::ZERO, -// fee_recipient: Address::ZERO, -// prev_randao: B256::repeat_byte(0x55), -// block_number: latest.number() + 1, -// gas_limit: 30_000_000, -// timestamp: latest.timestamp() + 2, -// extra_data: Default::default(), -// base_fee_per_gas: U256::from(1_000_000_000u64), -// }; -// let base_parent_hash = base.parent_hash; - -// let tx_a_hash = B256::from(*tx_a.tx_hash()); -// let tx_b_hash = B256::from(*tx_b.tx_hash()); -// let tx_c_hash = B256::from(*tx_c.tx_hash()); - -// let tx_a = into_encoded_recovered(tx_a, signer); -// let tx_b = into_encoded_recovered(tx_b, signer); -// let tx_c = into_encoded_recovered(tx_c, signer); - -// let evm_config = OpEvmConfig::optimism(OP_MAINNET.clone()); -// let mut builder = FlashBlockBuilder::::new(evm_config, provider); - -// let first = builder -// .execute(BuildArgs { -// base: base.clone(), -// transactions: vec![tx_a.clone(), tx_b.clone()], -// cached_state: None, -// last_flashblock_index: 0, -// last_flashblock_hash: B256::repeat_byte(0xA0), -// compute_state_root: false, -// pending_parent: None, -// }) -// .expect("first build succeeds") -// .expect("first build is canonical"); - -// assert_eq!(first.pending_state.execution_outcome.result.receipts.len(), 2); - -// let cached_hashes = vec![tx_a_hash, tx_b_hash]; -// let (bundle, receipts, requests, gas_used, blob_gas_used, skip) = builder -// .tx_cache -// .get_resumable_state_with_execution_meta_for_parent( -// base.block_number, -// base_parent_hash, -// &cached_hashes, -// ) -// .expect("cache should contain first build execution state"); -// assert_eq!(skip, 2); - -// let mut tampered_receipts = receipts.to_vec(); -// tampered_receipts[0].as_receipt_mut().cumulative_gas_used = -// tampered_receipts[0].as_receipt().cumulative_gas_used.saturating_add(17); -// let expected_tampered_gas = tampered_receipts[0].as_receipt().cumulative_gas_used; - -// builder.tx_cache.update_with_execution_meta_for_parent( -// base.block_number, -// base_parent_hash, -// cached_hashes, -// bundle.clone(), -// tampered_receipts, -// CachedExecutionMeta { requests: requests.clone(), gas_used, blob_gas_used }, -// ); - -// let second_hashes = vec![tx_a_hash, tx_b_hash, tx_c_hash]; -// let (_, _, _, _, _, skip) = builder -// .tx_cache -// .get_resumable_state_with_execution_meta_for_parent( -// base.block_number, -// base_parent_hash, -// &second_hashes, -// ) -// .expect("second tx list should extend cached prefix"); -// assert_eq!(skip, 2); - -// let second = builder -// .execute(BuildArgs { -// base, -// transactions: vec![tx_a, tx_b, tx_c], -// cached_state: None, -// last_flashblock_index: 1, -// last_flashblock_hash: B256::repeat_byte(0xA1), -// compute_state_root: false, -// pending_parent: None, -// }) -// .expect("second build succeeds") -// .expect("second build is canonical"); - -// let receipts = &second.pending_state.execution_outcome.result.receipts; -// assert_eq!(receipts.len(), 3); -// assert_eq!(receipts[0].as_receipt().cumulative_gas_used, expected_tampered_gas); -// assert!( -// receipts[2].as_receipt().cumulative_gas_used -// > receipts[1].as_receipt().cumulative_gas_used -// ); -// } -// } diff --git a/crates/flashblocks/src/lib.rs b/crates/flashblocks/src/lib.rs index 034656f7..e2189344 100644 --- a/crates/flashblocks/src/lib.rs +++ b/crates/flashblocks/src/lib.rs @@ -11,7 +11,7 @@ mod ws; mod test_utils; pub use cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}; -pub(crate) use execution::{BuildArgs, FlashblockCachedReceipt}; +pub use execution::FlashblockCachedReceipt; pub use service::FlashblocksRpcService; pub use subscription::FlashblocksPubSub; pub use ws::WsFlashBlockStream; From f2d37526175523cdaba87d8fe47b626add59386a Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 16 Mar 2026 18:11:45 +0800 Subject: [PATCH 32/76] feat: revamp worker, split into processor and validator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- Cargo.lock | 1 + crates/flashblocks/src/execution/mod.rs | 15 +- crates/flashblocks/src/execution/processor.rs | 216 ++++++++++ crates/flashblocks/src/execution/validator.rs | 292 +++++++++++++ crates/flashblocks/src/execution/worker.rs | 396 ------------------ crates/rpc/Cargo.toml | 1 + 6 files changed, 524 insertions(+), 397 deletions(-) create mode 100644 crates/flashblocks/src/execution/processor.rs create mode 100644 crates/flashblocks/src/execution/validator.rs delete mode 100644 crates/flashblocks/src/execution/worker.rs diff --git a/Cargo.lock b/Cargo.lock index 7be4e612..60bf1f88 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14352,6 +14352,7 @@ dependencies = [ "jsonrpsee", "jsonrpsee-types", "op-alloy-network", + "op-alloy-rpc-types", "reth-chain-state", "reth-optimism-primitives", "reth-optimism-rpc", diff --git a/crates/flashblocks/src/execution/mod.rs b/crates/flashblocks/src/execution/mod.rs index 89adac12..228b9576 100644 --- a/crates/flashblocks/src/execution/mod.rs +++ b/crates/flashblocks/src/execution/mod.rs @@ -1,4 +1,5 @@ -pub(crate) mod worker; +pub(crate) mod processor; +pub(crate) mod validator; use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; use reth_optimism_primitives::OpReceipt; @@ -9,6 +10,18 @@ pub(crate) struct BuildArgs { pub(crate) last_flashblock_index: u64, } +/// State root strategies during flashblocks sequence validation. +#[derive(Debug, Clone, Copy, Default)] +pub(crate) enum StateRootStrategy { + /// Synchronous state root computation + #[default] + Synchronous, + /// Parallel state root computation + Parallel, + /// Sparse trie task + SparseTrieTask, +} + /// Receipt requirements for cache-resume flow. pub trait FlashblockCachedReceipt: Clone { /// Adds `gas_offset` to each receipt's `cumulative_gas_used`. diff --git a/crates/flashblocks/src/execution/processor.rs b/crates/flashblocks/src/execution/processor.rs new file mode 100644 index 00000000..eb0f54eb --- /dev/null +++ b/crates/flashblocks/src/execution/processor.rs @@ -0,0 +1,216 @@ +use crate::{execution::StateRootStrategy, FlashblockCachedReceipt}; +use alloy_eips::eip2718::WithEncoded; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; + +use reth_errors::RethError; +use reth_evm::{ + execute::{ + BlockAssembler, BlockAssemblerInput, BlockBuilder, BlockBuilderOutcome, BlockExecutor, + }, + ConfigureEvm, Evm, +}; +use reth_execution_types::BlockExecutionResult; +use reth_primitives_traits::{HeaderTy, NodePrimitives, Recovered, RecoveredBlock}; +use reth_revm::{ + cached::CachedReads, + database::StateProviderDatabase, + db::{states::bundle_state::BundleRetention, BundleState, State}, +}; +use reth_storage_api::StateProvider; +use reth_trie_common::HashedPostState; + +/// Data returned from processor to validator for cache commit. +pub(crate) struct ProcessorOutcome { + pub(crate) execution_result: BlockExecutionResult, + pub(crate) block: RecoveredBlock, + pub(crate) hashed_state: HashedPostState, + pub(crate) bundle: BundleState, + pub(crate) read_cache: CachedReads, +} + +/// Data extracted by validator from `PendingSequence`, passed to processor for incremental +/// execution. +pub(crate) struct IncrementalPrestate { + pub(crate) prestate_bundle: BundleState, + pub(crate) cached_tx_count: usize, + pub(crate) cached_receipts: Vec, + pub(crate) cached_gas_used: u64, + pub(crate) cached_blob_gas_used: u64, + pub(crate) cached_reads: CachedReads, +} + +/// Handles transaction execution, state root computation, and block assembly for flashblock +/// sequences. +/// +/// Separated from [`super::validator::FlashblockSequenceValidator`] so that configurable state +/// root strategies (`Synchronous`, `Parallel`, `SparseTrieTask`) live cleanly here, while the +/// validator handles flashblocks-specific cache orchestration. +#[derive(Debug)] +pub(crate) struct FlashblockSequenceProcessor { + evm_config: EvmConfig, + state_root_strategy: StateRootStrategy, + _primitives: std::marker::PhantomData, +} + +impl FlashblockSequenceProcessor +where + N: NodePrimitives, + N::Receipt: FlashblockCachedReceipt, +{ + pub(crate) fn new(evm_config: EvmConfig) -> Self { + Self { + evm_config, + state_root_strategy: StateRootStrategy::default(), + _primitives: std::marker::PhantomData, + } + } +} + +impl FlashblockSequenceProcessor +where + N: NodePrimitives, + N::Receipt: FlashblockCachedReceipt, + EvmConfig: ConfigureEvm + Unpin>, +{ + /// Full flashblocks sequence execution from scratch. + pub(crate) fn process_fresh( + &self, + base: &OpFlashblockPayloadBase, + transactions: &[WithEncoded>], + state_provider: &dyn StateProvider, + parent_header: &reth_primitives_traits::SealedHeader>, + ) -> eyre::Result> { + let mut read_cache = CachedReads::default(); + let cached_db = read_cache.as_db_mut(StateProviderDatabase::new(state_provider)); + let mut state = State::builder().with_database(cached_db).with_bundle_update().build(); + + let mut builder = self + .evm_config + .builder_for_next_block(&mut state, parent_header, base.clone().into()) + .map_err(RethError::other)?; + builder.apply_pre_execution_changes()?; + for tx in transactions { + builder.execute_transaction(tx.clone())?; + } + let BlockBuilderOutcome { execution_result, block, hashed_state, .. } = + builder.finish(state_provider)?; + let bundle = state.take_bundle(); + + Ok(ProcessorOutcome { execution_result, block, hashed_state, bundle, read_cache }) + } + + /// Suffix-only execution reusing cached prefix state from an existing pending sequence. + pub(crate) fn process_incremental( + &self, + base: &OpFlashblockPayloadBase, + transactions: &[WithEncoded>], + state_provider: &dyn StateProvider, + parent_header: &reth_primitives_traits::SealedHeader>, + prestate: IncrementalPrestate, + ) -> eyre::Result> { + let mut read_cache = prestate.cached_reads; + let cached_db = read_cache.as_db_mut(StateProviderDatabase::new(state_provider)); + let mut state = State::builder() + .with_database(cached_db) + .with_bundle_prestate(prestate.prestate_bundle) + .with_bundle_update() + .build(); + + let attrs = base.clone().into(); + let evm_env = + self.evm_config.next_evm_env(parent_header, &attrs).map_err(RethError::other)?; + let execution_ctx = self + .evm_config + .context_for_next_block(parent_header, attrs) + .map_err(RethError::other)?; + + // Skip `apply_pre_execution_changes` (already applied in the original fresh build). + // The only pre-execution effect we need is `set_state_clear_flag`, which configures + // EVM empty-account handling (OP Stack chains activate Spurious Dragon at genesis, so + // this is always true). + state.set_state_clear_flag(true); + let evm = self.evm_config.evm_with_env(&mut state, evm_env.clone()); + let mut executor = self.evm_config.create_executor(evm, execution_ctx.clone()); + + for tx in transactions.iter().skip(prestate.cached_tx_count).cloned() { + executor.execute_transaction(tx)?; + } + + let (evm, execution_result) = executor.finish()?; + let (db, _evm_env) = evm.finish(); + db.merge_transitions(BundleRetention::Reverts); + + let execution_result = merge_cached_block_execution_results::( + prestate.cached_receipts, + prestate.cached_gas_used, + prestate.cached_blob_gas_used, + execution_result, + ); + + // Compute state root + let hashed_state = state_provider.hashed_post_state(&db.bundle_state); + let (state_root, _) = self.compute_state_root(state_provider, &hashed_state)?; + let bundle = db.take_bundle(); + + // Assemble block + let (block_transactions, senders): (Vec<_>, Vec<_>) = + transactions.iter().map(|tx| tx.1.clone().into_parts()).unzip(); + let block = self + .evm_config + .block_assembler() + .assemble_block(BlockAssemblerInput::new( + evm_env, + execution_ctx, + parent_header, + block_transactions, + &execution_result, + &bundle, + state_provider, + state_root, + )) + .map_err(RethError::other)?; + let block = RecoveredBlock::new_unhashed(block, senders); + + Ok(ProcessorOutcome { execution_result, block, hashed_state, bundle, read_cache }) + } + + /// Dispatches state root computation based on the configured [`StateRootStrategy`]. + fn compute_state_root( + &self, + state_provider: &dyn StateProvider, + hashed_state: &HashedPostState, + ) -> eyre::Result<(alloy_primitives::B256, reth_trie_common::updates::TrieUpdates)> { + match self.state_root_strategy { + StateRootStrategy::Synchronous => state_provider + .state_root_with_updates(hashed_state.clone()) + .map_err(|e| eyre::eyre!(e)), + StateRootStrategy::Parallel | StateRootStrategy::SparseTrieTask => { + unimplemented!( + "Parallel and SparseTrieTask state root strategies are not yet implemented" + ) + } + } + } +} + +/// Merges prefix (cached) and suffix execution results into a single +/// [`BlockExecutionResult`]. +fn merge_cached_block_execution_results( + cached_receipts: Vec, + cached_gas_used: u64, + cached_blob_gas_used: u64, + mut execution_result: BlockExecutionResult, +) -> BlockExecutionResult +where + N::Receipt: FlashblockCachedReceipt, +{ + N::Receipt::add_cumulative_gas_offset(&mut execution_result.receipts, cached_gas_used); + let mut receipts = cached_receipts; + receipts.extend(execution_result.receipts); + BlockExecutionResult { + receipts, + requests: execution_result.requests, + gas_used: cached_gas_used.saturating_add(execution_result.gas_used), + blob_gas_used: cached_blob_gas_used.saturating_add(execution_result.blob_gas_used), + } +} diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs new file mode 100644 index 00000000..42db3abf --- /dev/null +++ b/crates/flashblocks/src/execution/validator.rs @@ -0,0 +1,292 @@ +use crate::{ + cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}, + execution::{ + processor::{FlashblockSequenceProcessor, IncrementalPrestate, ProcessorOutcome}, + BuildArgs, + }, + FlashblockCachedReceipt, +}; +use alloy_eips::eip2718::WithEncoded; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; +use std::{ + collections::HashMap, + sync::Arc, + time::{Duration, Instant}, +}; +use tracing::*; + +use reth_chain_state::{ComputedTrieData, ExecutedBlock}; +use reth_evm::ConfigureEvm; +use reth_execution_types::BlockExecutionOutput; +use reth_primitives_traits::{ + transaction::TxHashRef, BlockBody, HeaderTy, NodePrimitives, Recovered, +}; +use reth_rpc_eth_types::PendingBlock; +use reth_storage_api::{HeaderProvider, StateProviderFactory}; + +/// Builds the [`PendingSequence`]s from the accumulated flashblock transaction sequences. +/// Commits results directly to [`FlashblockStateCache`] via `handle_pending_sequence()`. +/// +/// Supports two execution modes: +/// - **Fresh**: Full execution for a new block height. +/// - **Incremental**: Suffix-only execution reusing cached prefix state from an existing +/// pending sequence at the same height. +/// +/// Delegates transaction execution and block assembly to +/// [`FlashblockSequenceProcessor`]. +#[derive(Debug)] +pub(crate) struct FlashblockSequenceValidator +where + N::Receipt: FlashblockCachedReceipt, +{ + /// Handles transaction execution, state root computation, and block assembly. + processor: FlashblockSequenceProcessor, + /// The state provider factory for resolving canonical and historical state. + provider: Provider, + /// The flashblocks state cache containing the flashblocks state cache layer. + flashblocks_state: FlashblockStateCache, +} + +impl FlashblockSequenceValidator +where + N::Receipt: FlashblockCachedReceipt, +{ + pub(crate) fn new( + evm_config: EvmConfig, + provider: Provider, + flashblocks_state: FlashblockStateCache, + ) -> Self { + Self { + processor: FlashblockSequenceProcessor::new(evm_config), + provider, + flashblocks_state, + } + } + + pub(crate) const fn provider(&self) -> &Provider { + &self.provider + } +} + +impl FlashblockSequenceValidator +where + N: NodePrimitives, + N::Receipt: FlashblockCachedReceipt, + EvmConfig: ConfigureEvm + Unpin>, + Provider: StateProviderFactory + HeaderProvider
> + Unpin, +{ + /// Executes a flashblock transaction sequence and commits the result to the flashblocks + /// state cache. Note that the flashblocks sequence validator should be the only handle + /// that advances the flashblocks state cache tip. + /// + /// Determines execution mode from the current pending state: + /// - No pending sequence exists, cache not yet initialized → fresh build. + /// - Pending is at a different height → fresh build. + /// - If pending exists at the same height → incremental build. + pub(crate) fn execute>>>( + &mut self, + args: BuildArgs, + ) -> eyre::Result<()> { + let block_number = args.base.block_number; + let transactions: Vec<_> = args.transactions.into_iter().collect(); + + // Determine execution mode from pending state + let pending = self.flashblocks_state.get_pending_sequence(); + let pending_height = pending.as_ref().map(|p| p.get_height()); + let incremental = pending_height == Some(block_number); + + // Validate height continuity + if let Some(pending_height) = pending_height + && block_number != pending_height + && block_number != pending_height + 1 + { + warn!( + target: "flashblocks", + incoming_height = block_number, + pending_height = pending_height, + "state mismatch from incoming sequence to current pending tip", + ); + return Err(eyre::eyre!( + "state mismatch from incoming sequence to current pending tip" + )); + } + + if incremental { + self.execute_incremental( + args.base, + transactions, + args.last_flashblock_index, + pending.unwrap(), + ) + } else { + self.execute_fresh(args.base, transactions, args.last_flashblock_index) + } + } + + /// Full flashblocks sequence execution from a new block height. + /// + /// Resolves the state provider and parent header, then delegates execution to the + /// processor. + fn execute_fresh( + &self, + base: OpFlashblockPayloadBase, + transactions: Vec>>, + last_flashblock_index: u64, + ) -> eyre::Result<()> { + let parent_hash = base.parent_hash; + + // Prioritize trying to get parent hash state from canonical provider first. If + // the parent is not in the canonical chain, then try building fresh on top of + // the current pending sequence (current pending promoted to confirm, incoming + // sequence is the next height). Fall back to the flashblocks overlay via + // `get_pending_state_provider`. + let (state_provider, parent_header) = + match self.provider.history_by_block_hash(parent_hash) { + Ok(canon_provider) => { + let header = + self.provider.sealed_header_by_hash(parent_hash)?.ok_or_else(|| { + eyre::eyre!("parent header not found for hash {parent_hash}") + })?; + (canon_provider, header) + } + Err(err) => { + trace!( + target: "flashblocks", + error = %err, + "parent not in canonical chain, try getting state from pending state", + ); + let canonical_state = self.provider.latest()?; + self.flashblocks_state.get_pending_state_provider(canonical_state).ok_or_else( + || { + eyre::eyre!( + "parent {parent_hash} not in canonical chain and no \ + pending state available for overlay" + ) + }, + )? + } + }; + + let outcome = self.processor.process_fresh( + &base, + &transactions, + state_provider.as_ref(), + &parent_header, + )?; + + self.commit_pending_sequence(base, transactions, last_flashblock_index, outcome) + } + + /// Incremental execution for the same block height as the current pending. Reuses + /// the pending sequence's `BundleState` as prestate and its warm `CachedReads`, + /// executing only new unexecuted transactions from incremental flashblock payloads. + fn execute_incremental( + &self, + base: OpFlashblockPayloadBase, + transactions: Vec>>, + last_flashblock_index: u64, + pending: PendingSequence, + ) -> eyre::Result<()> { + if pending.last_flashblock_index != last_flashblock_index { + warn!( + target: "flashblocks", + incoming_last_flashblock_index = last_flashblock_index, + pending_last_flashblock_index = pending.last_flashblock_index, + "state mismatch from incoming sequence to current pending tip", + ); + return Err(eyre::eyre!( + "state mismatch, last flashblock index mismatch pending index" + )); + } + + // Get latest canonical state, then overlay flashblocks state cache blocks + // from canonical height up to the parent hash. + let parent_hash = base.parent_hash; + let canonical_state = self.provider.latest()?; + let (state_provider, parent_header) = self + .flashblocks_state + .get_state_provider_by_hash(parent_hash, canonical_state) + .ok_or_else(|| { + eyre::eyre!("failed to build overlay state provider for parent {parent_hash}") + })?; + + // Extract prestate from current pending + let exec_output = &pending.pending.executed_block.execution_output; + let prestate = IncrementalPrestate { + prestate_bundle: exec_output.state.clone(), + cached_tx_count: pending + .pending + .executed_block + .recovered_block + .body() + .transaction_count(), + cached_receipts: exec_output.result.receipts.clone(), + cached_gas_used: exec_output.result.gas_used, + cached_blob_gas_used: exec_output.result.blob_gas_used, + cached_reads: pending.cached_reads, + }; + + let outcome = self.processor.process_incremental( + &base, + &transactions, + state_provider.as_ref(), + &parent_header, + prestate, + )?; + + self.commit_pending_sequence(base, transactions, last_flashblock_index, outcome) + } + + /// Builds a [`PendingSequence`] from a [`ProcessorOutcome`] and commits it to the + /// flashblocks state cache. + fn commit_pending_sequence( + &self, + base: OpFlashblockPayloadBase, + transactions: Vec>>, + last_flashblock_index: u64, + outcome: ProcessorOutcome, + ) -> eyre::Result<()> { + let block_hash = outcome.block.hash(); + let parent_hash = base.parent_hash; + + let execution_outcome = Arc::new(BlockExecutionOutput { + state: outcome.bundle, + result: outcome.execution_result, + }); + let executed_block = ExecutedBlock::new( + outcome.block.into(), + execution_outcome.clone(), + ComputedTrieData::without_trie_input( + Arc::new(outcome.hashed_state.into_sorted()), + Arc::default(), + ), + ); + let pending_block = PendingBlock::with_executed_block( + Instant::now() + Duration::from_secs(1), + executed_block, + ); + + // Build tx index + let mut tx_index = HashMap::with_capacity(transactions.len()); + for (idx, tx) in transactions.iter().enumerate() { + tx_index.insert( + *tx.tx_hash(), + CachedTxInfo { + block_number: base.block_number, + block_hash, + tx_index: idx as u64, + tx: tx.1.clone().into_inner(), + receipt: execution_outcome.result.receipts[idx].clone(), + }, + ); + } + self.flashblocks_state.handle_pending_sequence(PendingSequence { + pending: pending_block, + tx_index, + cached_reads: outcome.read_cache, + block_hash, + parent_hash, + last_flashblock_index, + }) + } +} diff --git a/crates/flashblocks/src/execution/worker.rs b/crates/flashblocks/src/execution/worker.rs deleted file mode 100644 index 5f7d9e05..00000000 --- a/crates/flashblocks/src/execution/worker.rs +++ /dev/null @@ -1,396 +0,0 @@ -use crate::{ - cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}, - execution::BuildArgs, - FlashblockCachedReceipt, -}; -use alloy_eips::eip2718::WithEncoded; -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; -use std::{ - collections::HashMap, - sync::Arc, - time::{Duration, Instant}, -}; -use tracing::*; - -use reth_chain_state::{ComputedTrieData, ExecutedBlock}; -use reth_errors::RethError; -use reth_evm::{ - execute::{ - BlockAssembler, BlockAssemblerInput, BlockBuilder, BlockBuilderOutcome, BlockExecutor, - }, - ConfigureEvm, Evm, -}; -use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; -use reth_primitives_traits::{ - transaction::TxHashRef, BlockBody, HeaderTy, NodePrimitives, Recovered, RecoveredBlock, -}; -use reth_revm::{ - cached::CachedReads, - database::StateProviderDatabase, - db::{states::bundle_state::BundleRetention, BundleState, State}, -}; -use reth_rpc_eth_types::PendingBlock; -use reth_storage_api::{ - HashedPostStateProvider, HeaderProvider, StateProviderFactory, StateRootProvider, -}; -use reth_trie_common::HashedPostState; - -/// Builds the [`PendingSequence`]s from the accumulated flashblock transaction sequences. -/// Commits results directly to [`FlashblockStateCache`] via `handle_pending_sequence()`. -/// -/// Supports two execution modes: -/// - **Fresh**: Full execution for a new block height. -/// - **Incremental**: Suffix-only execution reusing cached prefix state from an existing -/// pending sequence at the same height. -#[derive(Debug)] -pub(crate) struct FlashblockSequenceValidator -where - N::Receipt: FlashblockCachedReceipt, -{ - /// The EVM configuration used to build the flashblocks. - evm_config: EvmConfig, - /// The canonical chainstate provider. - provider: Provider, - /// The flashblocks state cache containing the flashblocks state cache layer. - flashblocks_state: FlashblockStateCache, -} - -impl FlashblockSequenceValidator -where - N::Receipt: FlashblockCachedReceipt, -{ - pub(crate) fn new( - evm_config: EvmConfig, - provider: Provider, - flashblocks_state: FlashblockStateCache, - ) -> Self { - Self { evm_config, provider, flashblocks_state } - } - - pub(crate) const fn provider(&self) -> &Provider { - &self.provider - } -} - -impl FlashblockSequenceValidator -where - N: NodePrimitives, - N::Receipt: FlashblockCachedReceipt, - EvmConfig: ConfigureEvm + Unpin>, - Provider: StateProviderFactory + HeaderProvider
> + Unpin, -{ - /// Executes a flashblock transaction sequence and commits the result to the flashblocks - /// state cache. Note that the flashblocks sequence validator should be the only handle - /// that advances the flashblocks state cache tip. - /// - /// Determines execution mode from the current pending state: - /// - No pending sequence exists, cache not yet initialized → fresh build. - /// - Pending is at a different height → fresh build. - /// - If pending exists at the same height → incremental build. - pub(crate) fn execute>>>( - &mut self, - args: BuildArgs, - ) -> eyre::Result<()> { - let block_number = args.base.block_number; - let transactions: Vec<_> = args.transactions.into_iter().collect(); - - // Determine execution mode from pending state - let pending = self.flashblocks_state.get_pending_sequence(); - let pending_height = pending.as_ref().map(|p| p.get_height()); - let incremental = pending_height == Some(block_number); - - // Validate height continuity - if let Some(pending_height) = pending_height - && block_number != pending_height - && block_number != pending_height + 1 - { - // State cache is polluted - warn!( - target: "flashblocks", - incoming_height = block_number, - pending_height = pending_height, - "state mismatch from incoming sequence to current pending tip", - ); - return Err(eyre::eyre!( - "state mismatch from incoming sequence to current pending tip" - )); - } - - if incremental { - self.execute_incremental( - args.base, - transactions, - args.last_flashblock_index, - pending.unwrap(), - ) - } else { - self.execute_fresh(args.base, transactions, args.last_flashblock_index) - } - } - - /// Full flashblocks sequence execution from a new block height. - fn execute_fresh( - &self, - base: OpFlashblockPayloadBase, - transactions: Vec>>, - last_flashblock_index: u64, - ) -> eyre::Result<()> { - let parent_hash = base.parent_hash; - - // Prioritize trying to get parent hash state from canonical provider first. If - // the parent is not in the canonical chain, then try building fresh on top of - // the current pending sequence (current pending promoted to confirm, incoming - // sequence is the next height). Fall back to the flashblocks overlay via - // `get_pending_state_provider`. - let (state_provider, parent_header) = match self.provider.history_by_block_hash(parent_hash) - { - Ok(canon_provider) => { - let header = self - .provider - .sealed_header_by_hash(parent_hash)? - .ok_or_else(|| eyre::eyre!("parent header not found for hash {parent_hash}"))?; - (canon_provider, header) - } - Err(err) => { - trace!( - target: "flashblocks", - error = %err, - "parent not in canonical chain, try getting state from pending state", - ); - let canonical_state = self.provider.latest()?; - self.flashblocks_state.get_pending_state_provider(canonical_state).ok_or_else( - || { - eyre::eyre!( - "parent {parent_hash} not in canonical chain and no \ - pending state available for overlay" - ) - }, - )? - } - }; - - let mut request_cache = CachedReads::default(); - let cached_db = - request_cache.as_db_mut(StateProviderDatabase::new(state_provider.as_ref())); - let mut state = State::builder().with_database(cached_db).with_bundle_update().build(); - - let mut builder = self - .evm_config - .builder_for_next_block(&mut state, &parent_header, base.clone().into()) - .map_err(RethError::other)?; - builder.apply_pre_execution_changes()?; - for tx in &transactions { - builder.execute_transaction(tx.clone())?; - } - let BlockBuilderOutcome { execution_result, block, hashed_state, .. } = - builder.finish(state_provider.as_ref())?; - let bundle = state.take_bundle(); - - self.commit_pending_sequence( - base, - transactions, - last_flashblock_index, - execution_result, - block, - hashed_state, - bundle, - request_cache, - ) - } - - /// Incremental execution for the same block height as the current pending. Reuses - /// the pending sequence's `BundleState` as prestate and its warm `CachedReads`, - /// executing only new unexecuted transactions from incremental flashblock payloads. - fn execute_incremental( - &self, - base: OpFlashblockPayloadBase, - transactions: Vec>>, - last_flashblock_index: u64, - pending: PendingSequence, - ) -> eyre::Result<()> { - if pending.last_flashblock_index != last_flashblock_index { - // State cache is polluted - warn!( - target: "flashblocks", - incoming_last_flashblock_index = last_flashblock_index, - pending_last_flashblock_index = pending.last_flashblock_index, - "state mismatch from incoming sequence to current pending tip", - ); - return Err(eyre::eyre!( - "state mismatch, last flashblock index mismatch pending index" - )); - } - - // Get latest canonical state, then overlay flashblocks state cache blocks - // from canonical height up to the parent hash. This handles the case where - // the parent is a flashblocks-confirmed block ahead of canonical. - let parent_hash = base.parent_hash; - let canonical_state = self.provider.latest()?; - let (state_provider, parent_header) = self - .flashblocks_state - .get_state_provider_by_hash(parent_hash, canonical_state) - .ok_or_else(|| { - eyre::eyre!("failed to build overlay state provider for parent {parent_hash}") - })?; - - // Extract prestate from current pending - let exec_output = &pending.pending.executed_block.execution_output; - let prestate_bundle = exec_output.state.clone(); - let cached_tx_count = - pending.pending.executed_block.recovered_block.body().transaction_count(); - let cached_receipts = exec_output.result.receipts.clone(); - let cached_gas_used = exec_output.result.gas_used; - let cached_blob_gas_used = exec_output.result.blob_gas_used; - - // Set up state DB with pending's warm CachedReads + prestate bundle - let mut request_cache = pending.cached_reads; - let cached_db = - request_cache.as_db_mut(StateProviderDatabase::new(state_provider.as_ref())); - let mut state = State::builder() - .with_database(cached_db) - .with_bundle_prestate(prestate_bundle) - .with_bundle_update() - .build(); - - let attrs = base.clone().into(); - let evm_env = - self.evm_config.next_evm_env(&parent_header, &attrs).map_err(RethError::other)?; - let execution_ctx = self - .evm_config - .context_for_next_block(&parent_header, attrs) - .map_err(RethError::other)?; - - // Skip apply_pre_execution_changes (already applied in the original fresh build). - // The only pre-execution effect we need is set_state_clear_flag, which configures EVM - // empty-account handling (OP Stack chains activate Spurious Dragon at genesis, so - // this is always true). - state.set_state_clear_flag(true); - let evm = self.evm_config.evm_with_env(&mut state, evm_env); - let mut executor = self.evm_config.create_executor(evm, execution_ctx.clone()); - - for tx in transactions.iter().skip(cached_tx_count).cloned() { - executor.execute_transaction(tx)?; - } - - let (evm, execution_result) = executor.finish()?; - let (db, evm_env) = evm.finish(); - db.merge_transitions(BundleRetention::Reverts); - - let execution_result = Self::merge_cached_block_execution_results( - cached_receipts, - cached_gas_used, - cached_blob_gas_used, - execution_result, - ); - - // Compute state root via sparse trie - let hashed_state = state_provider.hashed_post_state(&db.bundle_state); - let (state_root, _) = state_provider - .state_root_with_updates(hashed_state.clone()) - .map_err(RethError::other)?; - let bundle = db.take_bundle(); - - // Assemble block - let (block_transactions, senders): (Vec<_>, Vec<_>) = - transactions.iter().map(|tx| tx.1.clone().into_parts()).unzip(); - let block = self - .evm_config - .block_assembler() - .assemble_block(BlockAssemblerInput::new( - evm_env, - execution_ctx, - &parent_header, - block_transactions, - &execution_result, - &bundle, - state_provider.as_ref(), - state_root, - )) - .map_err(RethError::other)?; - let block = RecoveredBlock::new_unhashed(block, senders); - - self.commit_pending_sequence( - base, - transactions, - last_flashblock_index, - execution_result, - block, - hashed_state, - bundle, - request_cache, - ) - } - - /// Builds a [`PendingSequence`] and commits it to the flashblocks state cache. - #[expect(clippy::too_many_arguments)] - fn commit_pending_sequence( - &self, - base: OpFlashblockPayloadBase, - transactions: Vec>>, - last_flashblock_index: u64, - execution_result: BlockExecutionResult, - block: RecoveredBlock, - hashed_state: HashedPostState, - bundle: BundleState, - request_cache: CachedReads, - ) -> eyre::Result<()> { - let block_hash = block.hash(); - let parent_hash = base.parent_hash; - - // Build pending execution block - let execution_outcome = - Arc::new(BlockExecutionOutput { state: bundle, result: execution_result }); - let executed_block = ExecutedBlock::new( - block.into(), - execution_outcome.clone(), - ComputedTrieData::without_trie_input( - Arc::new(hashed_state.into_sorted()), - Arc::default(), - ), - ); - let pending_block = PendingBlock::with_executed_block( - Instant::now() + Duration::from_secs(1), - executed_block, - ); - - // Build tx index - let mut tx_index = HashMap::with_capacity(transactions.len()); - for (idx, tx) in transactions.iter().enumerate() { - tx_index.insert( - *tx.tx_hash(), - CachedTxInfo { - block_number: base.block_number, - block_hash, - tx_index: idx as u64, - tx: tx.1.clone().into_inner(), - receipt: execution_outcome.result.receipts[idx].clone(), - }, - ); - } - self.flashblocks_state.handle_pending_sequence(PendingSequence { - pending: pending_block, - tx_index, - cached_reads: request_cache, - block_hash, - parent_hash, - last_flashblock_index, - }) - } - - fn merge_cached_block_execution_results( - cached_receipts: Vec, - cached_gas_used: u64, - cached_blob_gas_used: u64, - mut execution_result: BlockExecutionResult, - ) -> BlockExecutionResult { - N::Receipt::add_cumulative_gas_offset(&mut execution_result.receipts, cached_gas_used); - let mut receipts = cached_receipts; - receipts.extend(execution_result.receipts); - BlockExecutionResult { - receipts, - requests: execution_result.requests, - gas_used: cached_gas_used.saturating_add(execution_result.gas_used), - blob_gas_used: cached_blob_gas_used.saturating_add(execution_result.blob_gas_used), - } - } -} diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 84ebde8a..e11c265f 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -34,6 +34,7 @@ alloy-rpc-types-eth.workspace = true # op op-alloy-network.workspace = true +op-alloy-rpc-types.workspace = true # rpc async-trait.workspace = true From 94e1d8ac79d9f99064744d668c8517a7bae3cac9 Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 16 Mar 2026 18:12:15 +0800 Subject: [PATCH 33/76] style(flashblocks): reformat validator match expression MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 --- crates/flashblocks/src/execution/validator.rs | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index 42db3abf..6d8f0e78 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -140,32 +140,32 @@ where // the current pending sequence (current pending promoted to confirm, incoming // sequence is the next height). Fall back to the flashblocks overlay via // `get_pending_state_provider`. - let (state_provider, parent_header) = - match self.provider.history_by_block_hash(parent_hash) { - Ok(canon_provider) => { - let header = - self.provider.sealed_header_by_hash(parent_hash)?.ok_or_else(|| { - eyre::eyre!("parent header not found for hash {parent_hash}") - })?; - (canon_provider, header) - } - Err(err) => { - trace!( - target: "flashblocks", - error = %err, - "parent not in canonical chain, try getting state from pending state", - ); - let canonical_state = self.provider.latest()?; - self.flashblocks_state.get_pending_state_provider(canonical_state).ok_or_else( - || { - eyre::eyre!( - "parent {parent_hash} not in canonical chain and no \ + let (state_provider, parent_header) = match self.provider.history_by_block_hash(parent_hash) + { + Ok(canon_provider) => { + let header = self + .provider + .sealed_header_by_hash(parent_hash)? + .ok_or_else(|| eyre::eyre!("parent header not found for hash {parent_hash}"))?; + (canon_provider, header) + } + Err(err) => { + trace!( + target: "flashblocks", + error = %err, + "parent not in canonical chain, try getting state from pending state", + ); + let canonical_state = self.provider.latest()?; + self.flashblocks_state.get_pending_state_provider(canonical_state).ok_or_else( + || { + eyre::eyre!( + "parent {parent_hash} not in canonical chain and no \ pending state available for overlay" - ) - }, - )? - } - }; + ) + }, + )? + } + }; let outcome = self.processor.process_fresh( &base, From 56c22f5e743e5cfeb5958aae4cfe30f75767c531 Mon Sep 17 00:00:00 2001 From: Niven Date: Tue, 17 Mar 2026 17:11:38 +0800 Subject: [PATCH 34/76] refactor(rpc): update eth API to use explicit generic parameters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace associated-type-based `Eth` generic with explicit `N` (node) and `Rpc` (converter) generics on `XLayerEthApiExt` and helper functions. Use fully-qualified `EthApiServer::` calls instead of inherent method dispatch for clearer trait resolution. Simplify helper function bounds by parameterizing directly on `Rpc: RpcConvert` instead of `Eth: EthApiTypes`. Update storage slot type to `JsonStorageKey` and fix transaction body access to use direct field/method calls. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.6 --- crates/rpc/Cargo.toml | 1 + crates/rpc/src/eth.rs | 203 ++++++++++++++++++++--------------- crates/rpc/src/helper.rs | 50 ++++----- crates/rpc/src/lib.rs | 2 +- crates/rpc/src/xlayer_ext.rs | 4 +- 5 files changed, 142 insertions(+), 118 deletions(-) diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index e11c265f..b1cebb3f 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -31,6 +31,7 @@ alloy-consensus.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true +alloy-serde.workspace = true # op op-alloy-network.workspace = true diff --git a/crates/rpc/src/eth.rs b/crates/rpc/src/eth.rs index 265fb02a..fe3c100e 100644 --- a/crates/rpc/src/eth.rs +++ b/crates/rpc/src/eth.rs @@ -9,28 +9,28 @@ use jsonrpsee::{ use tokio_stream::wrappers::WatchStream; use tracing::*; -use alloy_consensus::BlockHeader; +use alloy_eips::eip2718::Encodable2718; use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; use alloy_rpc_types_eth::{ state::{EvmOverrides, StateOverride}, - BlockOverrides, Filter, Index, Log, TransactionInfo, + BlockOverrides, Index, }; +use alloy_serde::JsonStorageKey; use op_alloy_network::Optimism; use op_alloy_rpc_types::OpTransactionRequest; use reth_chain_state::CanonStateSubscriptions; use reth_optimism_primitives::OpPrimitives; -use reth_optimism_rpc::eth::OpEthApi; -use reth_primitives_traits::{BlockBody, NodePrimitives, SealedHeaderFor, SignerRecoverable}; +use reth_optimism_rpc::{OpEthApi, OpEthApiError}; +use reth_primitives_traits::SealedHeaderFor; use reth_revm::{database::StateProviderDatabase, db::State}; -use reth_rpc::eth::EthFilter; -use reth_rpc_convert::RpcTransaction; +use reth_rpc_convert::{RpcConvert, RpcTransaction}; use reth_rpc_eth_api::{ - helpers::{estimate::EstimateCall, EthBlocks, EthCall, EthState, EthTransactions, FullEthApi}, - EthApiServer, EthApiTypes, RpcBlock, RpcNodeCore, RpcReceipt, + helpers::{estimate::EstimateCall, Call, FullEthApi, LoadState}, + EthApiServer, EthApiTypes, FromEvmError, RpcBlock, RpcNodeCore, RpcReceipt, }; -use reth_rpc_eth_types::{block::convert_transaction_receipt, error::FromEvmError, EthApiError}; +use reth_rpc_eth_types::{block::convert_transaction_receipt, EthApiError}; use reth_rpc_server_types::result::ToRpcResult; use reth_storage_api::{StateProvider, StateProviderBox, StateProviderFactory}; @@ -135,8 +135,8 @@ pub trait FlashblocksEthApiOverride { index: Index, ) -> RpcResult>; - /// Sends a signed transaction and awaits the transaction receipt, with the flashblock state cache - /// overlay support for pending and confirmed blocks. + /// Sends a signed transaction and awaits the transaction receipt, with the flashblock state + /// cache overlay support for pending and confirmed blocks. /// /// This will return a timeout error if the transaction isn't included within some time period. #[method(name = "sendRawTransactionSync")] @@ -164,9 +164,9 @@ pub trait FlashblocksEthApiOverride { block_number: Option, overrides: Option, ) -> RpcResult; + /// Returns the balance of the account of given address, with the flashblock state cache /// overlay support for pending and confirmed block states. - #[method(name = "getBalance")] async fn balance(&self, address: Address, block_number: Option) -> RpcResult; @@ -190,41 +190,59 @@ pub trait FlashblocksEthApiOverride { async fn storage_at( &self, address: Address, - slot: U256, + slot: JsonStorageKey, block_number: Option, ) -> RpcResult; } /// Extended Eth API with flashblocks cache overlay. #[derive(Debug)] -pub struct XLayerEthApiExt { - eth_api: OpEthApi, +pub struct XLayerEthApiExt { + eth_api: OpEthApi, + /// Stored separately to avoid associated type projection ambiguity when + /// the trait solver processes ` as EthApiTypes>::RpcConvert`. + converter: Rpc, flashblocks_state: FlashblockStateCache, } -impl XLayerEthApiExt { +impl XLayerEthApiExt { /// Creates a new [`XLayerEthApiExt`]. pub fn new( - eth_api: OpEthApi, + eth_api: OpEthApi, flashblocks_state: FlashblockStateCache, - ) -> Self { - Self { eth_api, flashblocks_state } + ) -> Self + where + Rpc: Clone + RpcConvert, + { + let converter = eth_api.converter().clone(); + Self { eth_api, converter, flashblocks_state } } } #[async_trait] -impl FlashblocksEthApiOverrideServer for XLayerEthApiExt +impl FlashblocksEthApiOverrideServer for XLayerEthApiExt where - Eth: FullEthApi + Send + Sync + 'static, - jsonrpsee_types::error::ErrorObject<'static>: From, + N: RpcNodeCore, + Rpc: RpcConvert, + OpEthApi: FullEthApi + + EthApiTypes + + RpcNodeCore + + LoadState + + Call + + EstimateCall + + Send + + Sync + + 'static, { // ----------------- Block apis ----------------- /// Handler for: `eth_blockNumber` async fn block_number(&self) -> RpcResult { trace!(target: "rpc::eth", "Serving eth_blockNumber"); let fb_height = self.flashblocks_state.get_confirm_height(); - let canon_height = self.eth_api.block_number().await?; - Ok(U256::from(std::cmp::max(fb_height, canon_height))) + // `EthApiServer::block_number` is synchronous (not async) + let canon_height: U256 = EthApiServer::block_number(&self.eth_api)?; + let fb_height = U256::from(fb_height); + Ok(std::cmp::max(fb_height, canon_height)) } /// Handler for: `eth_getBlockByNumber` @@ -235,22 +253,18 @@ where ) -> RpcResult>> { trace!(target: "rpc::eth", ?number, ?full, "Serving eth_getBlockByNumber"); if let Some(bar) = self.flashblocks_state.get_rpc_block(number) { - return to_rpc_block::(&bar, full, self.eth_api.converter()) - .map(Some) - .map_err(Into::into); + return to_rpc_block(&bar, full, &self.converter).map(Some).map_err(Into::into); } - self.eth_api.block_by_number(number, full).await + EthApiServer::block_by_number(&self.eth_api, number, full).await } /// Handler for: `eth_getBlockByHash` async fn block_by_hash(&self, hash: B256, full: bool) -> RpcResult>> { trace!(target: "rpc::eth", ?hash, ?full, "Serving eth_getBlockByHash"); if let Some(bar) = self.flashblocks_state.get_block_by_hash(&hash) { - return to_rpc_block::(&bar, full, self.eth_api.converter()) - .map(Some) - .map_err(Into::into); + return to_rpc_block(&bar, full, &self.converter).map(Some).map_err(Into::into); } - self.eth_api.block_by_hash(hash, full).await + EthApiServer::block_by_hash(&self.eth_api, hash, full).await } /// Handler for: `eth_getBlockReceipts` @@ -260,11 +274,9 @@ where ) -> RpcResult>>> { trace!(target: "rpc::eth", ?block_id, "Serving eth_getBlockReceipts"); if let Some(bar) = self.flashblocks_state.get_rpc_block(block_id) { - return to_block_receipts::(&bar, self.eth_api.converter()) - .map(Some) - .map_err(Into::into); + return to_block_receipts(&bar, &self.converter).map(Some).map_err(Into::into); } - self.eth_api.block_receipts(block_id).await + EthApiServer::block_receipts(&self.eth_api, block_id.into()).await } /// Handler for: `eth_getBlockTransactionCountByNumber` @@ -274,20 +286,20 @@ where ) -> RpcResult> { trace!(target: "rpc::eth", ?number, "Serving eth_getBlockTransactionCountByNumber"); if let Some(bar) = self.flashblocks_state.get_rpc_block(number) { - let count = bar.block.body().transaction_count(); + let count = bar.block.body().transactions.len(); return Ok(Some(U256::from(count))); } - self.eth_api.block_transaction_count_by_number(number).await + EthApiServer::block_transaction_count_by_number(&self.eth_api, number).await } - /// Handler for: `eth_getUncleCountByBlockHash` + /// Handler for: `eth_getBlockTransactionCountByHash` async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult> { trace!(target: "rpc::eth", ?hash, "Serving eth_getBlockTransactionCountByHash"); if let Some(bar) = self.flashblocks_state.get_block_by_hash(&hash) { - let count = bar.block.body().transaction_count(); + let count = bar.block.body().transactions.len(); return Ok(Some(U256::from(count))); } - self.eth_api.block_transaction_count_by_hash(hash).await + EthApiServer::block_transaction_count_by_hash(&self.eth_api, hash).await } // ----------------- Transaction apis ----------------- @@ -298,9 +310,9 @@ where ) -> RpcResult>> { trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionByHash"); if let Some((info, bar)) = self.flashblocks_state.get_tx_info(&hash) { - return Ok(Some(to_rpc_transaction::(&info, &bar, self.eth_api.converter())?)); + return Ok(Some(to_rpc_transaction(&info, &bar, &self.converter)?)); } - self.eth_api.transaction_by_hash(hash).await + EthApiServer::transaction_by_hash(&self.eth_api, hash).await } /// Handler for: `eth_getRawTransactionByHash` @@ -309,7 +321,7 @@ where if let Some((info, _)) = self.flashblocks_state.get_tx_info(&hash) { return Ok(Some(info.tx.encoded_2718().into())); } - self.eth_api.raw_transaction_by_hash(hash).await + EthApiServer::raw_transaction_by_hash(&self.eth_api, hash).await } /// Handler for: `eth_getTransactionReceipt` @@ -317,11 +329,11 @@ where trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionReceipt"); if let Some((_, bar)) = self.flashblocks_state.get_tx_info(&hash) && let Some(Ok(receipt)) = - bar.find_and_convert_transaction_receipt(hash, self.eth_api.converter()) + bar.find_and_convert_transaction_receipt(hash, &self.converter) { return Ok(Some(receipt)); } - self.eth_api.transaction_receipt(hash).await + EthApiServer::transaction_receipt(&self.eth_api, hash).await } /// Handler for: `eth_getTransactionByBlockHashAndIndex` @@ -332,14 +344,10 @@ where ) -> RpcResult>> { trace!(target: "rpc::eth", ?block_hash, ?index, "Serving eth_getTransactionByBlockHashAndIndex"); if let Some(bar) = self.flashblocks_state.get_block_by_hash(&block_hash) { - return to_rpc_transaction_from_bar_and_index::( - &bar, - index.into(), - self.eth_api.converter(), - ) - .map_err(Into::into); + return to_rpc_transaction_from_bar_and_index(&bar, index.into(), &self.converter) + .map_err(Into::into); } - self.eth_api.transaction_by_block_hash_and_index(block_hash, index).await + EthApiServer::transaction_by_block_hash_and_index(&self.eth_api, block_hash, index).await } /// Handler for: `eth_getTransactionByBlockNumberAndIndex` @@ -350,14 +358,10 @@ where ) -> RpcResult>> { trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getTransactionByBlockNumberAndIndex"); if let Some(bar) = self.flashblocks_state.get_rpc_block(number) { - return to_rpc_transaction_from_bar_and_index::( - &bar, - index.into(), - self.eth_api.converter(), - ) - .map_err(Into::into); + return to_rpc_transaction_from_bar_and_index(&bar, index.into(), &self.converter) + .map_err(Into::into); } - self.eth_api.transaction_by_block_number_and_index(number, index).await + EthApiServer::transaction_by_block_number_and_index(&self.eth_api, number, index).await } /// Handler for: `eth_getRawTransactionByBlockHashAndIndex` @@ -367,12 +371,13 @@ where index: Index, ) -> RpcResult> { trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getRawTransactionByBlockHashAndIndex"); + let idx: usize = index.into(); if let Some(bar) = self.flashblocks_state.get_block_by_hash(&hash) - && let Some(tx) = bar.block.body().transactions().nth(index.into()) + && let Some(tx) = bar.block.body().transactions.get(idx) { return Ok(Some(tx.encoded_2718().into())); } - self.eth_api.raw_transaction_by_block_hash_and_index(hash, index).await + EthApiServer::raw_transaction_by_block_hash_and_index(&self.eth_api, hash, index).await } /// Handler for: `eth_getRawTransactionByBlockNumberAndIndex` @@ -382,21 +387,25 @@ where index: Index, ) -> RpcResult> { trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getRawTransactionByBlockNumberAndIndex"); + let idx: usize = index.into(); if let Some(bar) = self.flashblocks_state.get_rpc_block(number) - && let Some(tx) = bar.block.body().transactions().nth(index.into()) + && let Some(tx) = bar.block.body().transactions.get(idx) { return Ok(Some(tx.encoded_2718().into())); } - self.eth_api.raw_transaction_by_block_number_and_index(number, index).await + EthApiServer::raw_transaction_by_block_number_and_index(&self.eth_api, number, index).await } /// Handler for: `eth_sendRawTransactionSync` async fn send_raw_transaction_sync(&self, tx: Bytes) -> RpcResult> { + use reth_rpc_eth_api::helpers::EthTransactions; + trace!(target: "rpc::eth", ?tx, "Serving eth_sendRawTransactionSync"); - let timeout_duration = EthTransactions::send_raw_transaction_sync_timeout(&self.eth_api); - let hash = - EthTransactions::send_raw_transaction(&self.eth_api, tx).await.map_err(Into::into)?; - let converter = self.eth_api.converter(); + let timeout_duration = self.eth_api.send_raw_transaction_sync_timeout(); + let hash = as EthTransactions>::send_raw_transaction(&self.eth_api, tx) + .await + .map_err(|e| -> jsonrpsee_types::error::ErrorObject<'static> { e.into() })?; + let converter = &self.converter; let mut canonical_stream = self.eth_api.provider().canonical_state_stream(); let mut flashblock_stream = @@ -413,7 +422,7 @@ where if let Some(receipt) = bar.find_and_convert_transaction_receipt(hash, converter) { - return receipt; + return receipt.map_err(Into::into); } } } @@ -421,18 +430,18 @@ where canonical_notification = canonical_stream.next() => { if let Some(notification) = canonical_notification { let chain = notification.committed(); - if let Some((block, tx, receipt, all_receipts)) = + if let Some((block, indexed_tx, receipt, all_receipts)) = chain.find_transaction_and_receipt_by_hash(hash) { if let Some(receipt) = convert_transaction_receipt( block, all_receipts, - tx, + indexed_tx, receipt, converter, ) .transpose() - .map_err(Into::into)? + .map_err(|e: OpEthApiError| -> jsonrpsee_types::error::ErrorObject<'static> { e.into() })? { return Ok(receipt); } @@ -465,7 +474,10 @@ where ) -> RpcResult { trace!(target: "rpc::eth", ?transaction, ?block_number, ?state_overrides, ?block_overrides, "Serving eth_call"); if let Some((state, header)) = self.get_flashblock_state_provider_by_id(block_number)? { - let evm_env = self.eth_api.evm_env_for_header(&header).map_err(Into::into)?; + let evm_env = self + .eth_api + .evm_env_for_header(&header) + .map_err(|e| -> jsonrpsee_types::error::ErrorObject<'static> { e.into() })?; let mut db = State::builder().with_database(StateProviderDatabase::new(state)).build(); let (evm_env, tx_env) = self .eth_api @@ -475,11 +487,22 @@ where &mut db, EvmOverrides::new(state_overrides, block_overrides), ) - .map_err(Into::into)?; - let res = EthCall::transact(&self.eth_api, db, evm_env, tx_env).map_err(Into::into)?; - return ::Error::ensure_success(res.result).map_err(Into::into); + .map_err(|e| -> jsonrpsee_types::error::ErrorObject<'static> { e.into() })?; + let res = self + .eth_api + .transact(db, evm_env, tx_env) + .map_err(|e| -> jsonrpsee_types::error::ErrorObject<'static> { e.into() })?; + return >::ensure_success(res.result) + .map_err(Into::into); } - self.eth_api.call(transaction, block_number, state_overrides, block_overrides).await + EthApiServer::call( + &self.eth_api, + transaction, + block_number, + state_overrides, + block_overrides, + ) + .await } /// Handler for: `eth_estimateGas` @@ -491,13 +514,16 @@ where ) -> RpcResult { trace!(target: "rpc::eth", ?transaction, ?block_number, "Serving eth_estimateGas"); if let Some((state, header)) = self.get_flashblock_state_provider_by_id(block_number)? { - let evm_env = self.eth_api.evm_env_for_header(&header).map_err(Into::into)?; + let evm_env = self + .eth_api + .evm_env_for_header(&header) + .map_err(|e| -> jsonrpsee_types::error::ErrorObject<'static> { e.into() })?; return self .eth_api .estimate_gas_with(evm_env, transaction, state, overrides) .map_err(Into::into); } - self.eth_api.estimate_gas(transaction, block_number, overrides).await + EthApiServer::estimate_gas(&self.eth_api, transaction, block_number, overrides).await } /// Handler for: `eth_getBalance` @@ -506,7 +532,7 @@ where if let Some((state, _)) = self.get_flashblock_state_provider_by_id(block_number)? { return Ok(state.account_balance(&address).to_rpc_result()?.unwrap_or_default()); } - self.eth_api.balance(address, block_number).await + EthApiServer::balance(&self.eth_api, address, block_number).await } /// Handler for: `eth_getTransactionCount` @@ -521,7 +547,7 @@ where state.account_nonce(&address).to_rpc_result()?.unwrap_or_default(), )); } - self.eth_api.transaction_count(address, block_number).await + EthApiServer::transaction_count(&self.eth_api, address, block_number).await } /// Handler for: `eth_getCode` @@ -534,34 +560,35 @@ where .map(|code| code.original_bytes()) .unwrap_or_default()); } - self.eth_api.get_code(address, block_number).await + EthApiServer::get_code(&self.eth_api, address, block_number).await } /// Handler for: `eth_getStorageAt` async fn storage_at( &self, address: Address, - slot: U256, + slot: JsonStorageKey, block_number: Option, ) -> RpcResult { trace!(target: "rpc::eth", ?address, ?slot, ?block_number, "Serving eth_getStorageAt"); if let Some((state, _)) = self.get_flashblock_state_provider_by_id(block_number)? { - let storage_key = B256::new(slot.to_be_bytes()); return Ok(B256::new( state - .storage(address, storage_key) + .storage(address, slot.as_b256()) .to_rpc_result()? .unwrap_or_default() .to_be_bytes(), )); } - self.eth_api.storage_at(address, slot, block_number).await + EthApiServer::storage_at(&self.eth_api, address, slot, block_number).await } } -impl XLayerEthApiExt +impl XLayerEthApiExt where - Eth: FullEthApi + Send + Sync + 'static, + N: RpcNodeCore, + Rpc: RpcConvert, + OpEthApi: RpcNodeCore + Send + Sync + 'static, { /// Returns a `StateProvider` overlaying flashblock execution state on top of canonical state /// for the given block ID. Returns `None` if the block is not in the flashblocks cache. diff --git a/crates/rpc/src/helper.rs b/crates/rpc/src/helper.rs index e9d75269..47316349 100644 --- a/crates/rpc/src/helper.rs +++ b/crates/rpc/src/helper.rs @@ -1,15 +1,16 @@ -use alloy_consensus::TxReceipt; +use alloy_consensus::{BlockHeader, TxReceipt}; +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; use alloy_rpc_types_eth::TransactionInfo; use op_alloy_network::Optimism; use reth_optimism_primitives::OpPrimitives; -use reth_primitives_traits::{Recovered, SignerRecoverable, TransactionMeta}; +use reth_primitives_traits::{Recovered, SignedTransaction, TransactionMeta}; use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert, RpcTransaction}; -use reth_rpc_eth_api::{EthApiTypes, RpcBlock, RpcReceipt}; -use reth_rpc_eth_types::{block::BlockAndReceipts, utils::calculate_gas_used_and_next_log_index}; +use reth_rpc_eth_api::{RpcBlock, RpcReceipt}; +use reth_rpc_eth_types::block::BlockAndReceipts; -use xlayer_flashblocks::cache::CachedTxInfo; +use xlayer_flashblocks::CachedTxInfo; /// Converter for `TransactionMeta` pub(crate) fn build_tx_meta( @@ -44,14 +45,13 @@ pub(crate) fn build_tx_info( } /// Converts a `BlockAndReceipts` into an RPC block. -pub(crate) fn to_rpc_block>( +pub(crate) fn to_rpc_block( bar: &BlockAndReceipts, full: bool, - converter: &Eth::RpcConvert, -) -> Result, Eth::Error> + converter: &Rpc, +) -> Result, Rpc::Error> where - Eth::RpcConvert: RpcConvert, - Eth::Error: From<::Error>, + Rpc: RpcConvert, { Ok(bar.block.clone_into_rpc_block( full.into(), @@ -61,13 +61,12 @@ where } /// Converts all receipts from a `BlockAndReceipts` into RPC receipts. -pub(crate) fn to_block_receipts>( +pub(crate) fn to_block_receipts( bar: &BlockAndReceipts, - converter: &Eth::RpcConvert, -) -> Result>, Eth::Error> + converter: &Rpc, +) -> Result>, Rpc::Error> where - Eth::RpcConvert: RpcConvert, - Eth::Error: From<::Error>, + Rpc: RpcConvert, { let txs = bar.block.body().transactions(); let senders = bar.block.senders(); @@ -77,7 +76,6 @@ where let mut next_log_index = 0usize; let inputs = txs - .iter() .zip(senders.iter()) .zip(receipts.iter()) .enumerate() @@ -105,28 +103,26 @@ where } /// Converts a `CachedTxInfo` and `BlockAndReceipts` into an RPC transaction. -pub(crate) fn to_rpc_transaction>( +pub(crate) fn to_rpc_transaction( info: &CachedTxInfo, bar: &BlockAndReceipts, - converter: &Eth::RpcConvert, -) -> Result, Eth::Error> + converter: &Rpc, +) -> Result, Rpc::Error> where - Eth::RpcConvert: RpcConvert, - Eth::Error: From<::Error>, + Rpc: RpcConvert, { let tx_info = build_tx_info(bar, info.tx.tx_hash(), info.tx_index); - Ok(converter.fill(info.tx.try_into_recovered_unchecked()?, tx_info)?) + Ok(converter.fill(info.tx.clone().try_into_recovered().expect("valid cached tx"), tx_info)?) } /// Converts a `BlockAndReceipts` and transaction index into an RPC transaction. -pub(crate) fn to_rpc_transaction_from_bar_and_index>( +pub(crate) fn to_rpc_transaction_from_bar_and_index( bar: &BlockAndReceipts, index: usize, - converter: &Eth::RpcConvert, -) -> Result>, Eth::Error> + converter: &Rpc, +) -> Result>, Rpc::Error> where - Eth::RpcConvert: RpcConvert, - Eth::Error: From<::Error>, + Rpc: RpcConvert, { if let Some((signer, tx)) = bar.block.transactions_with_sender().nth(index) { let tx_info = build_tx_info(bar, tx.tx_hash(), index as u64); diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index dcdb19fe..5af0eeac 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -5,7 +5,7 @@ pub mod eth; pub mod helper; pub mod xlayer_ext; -pub use eth::{EthApiOverrideServer, XLayerEthApiExt}; +pub use eth::{FlashblocksEthApiOverrideServer, XLayerEthApiExt}; pub use xlayer_ext::{SequencerClientProvider, XlayerRpcExt, XlayerRpcExtApiServer}; // Implement `SequencerClientProvider` for `OpEthApi` diff --git a/crates/rpc/src/xlayer_ext.rs b/crates/rpc/src/xlayer_ext.rs index bd9c50b6..d7e1788e 100644 --- a/crates/rpc/src/xlayer_ext.rs +++ b/crates/rpc/src/xlayer_ext.rs @@ -55,13 +55,13 @@ mod tests { #[test] fn test_flashblocks_disabled_when_no_cache() { let ext = XlayerRpcExt::new(None); - assert!(ext.flash_cache.is_none()); + assert!(ext.flashblocks_state.is_none()); } #[test] fn test_flashblocks_disabled_at_zero_height() { let cache = FlashblockStateCache::::new(); let ext = XlayerRpcExt::new(Some(cache)); - assert!(ext.flash_cache.as_ref().unwrap().get_confirm_height() == 0); + assert!(ext.flashblocks_state.as_ref().unwrap().get_confirm_height() == 0); } } From 2d6d29c77881d8fc57d5bf2201a8647d0cb2f353 Mon Sep 17 00:00:00 2001 From: Niven Date: Tue, 17 Mar 2026 17:29:51 +0800 Subject: [PATCH 35/76] refactor(rpc): rename modules and types for clarity MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename `eth.rs` to `flashblocks.rs` and `xlayer_ext.rs` to `default.rs` to better reflect their responsibilities. Rename `XLayerEthApiExt` to `FlashblocksEthApiExt`, `XlayerRpcExt` to `DefaultRpcExt`, and `XlayerRpcExtApi` to `DefaultRpcExtApi`. Extract `FlashblocksRpcArgs` as a standalone struct with derive macros and update `main.rs` to use the new type names and module paths. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.6 --- bin/node/src/args.rs | 11 +++++++++-- bin/node/src/main.rs | 19 ++++++++++--------- crates/rpc/src/{xlayer_ext.rs => default.rs} | 18 +++++++++--------- crates/rpc/src/{eth.rs => flashblocks.rs} | 16 +++++++++------- crates/rpc/src/helper.rs | 1 - crates/rpc/src/lib.rs | 8 ++++---- 6 files changed, 41 insertions(+), 32 deletions(-) rename crates/rpc/src/{xlayer_ext.rs => default.rs} (82%) rename crates/rpc/src/{eth.rs => flashblocks.rs} (98%) diff --git a/bin/node/src/args.rs b/bin/node/src/args.rs index a6c2c64a..fc9d13e0 100644 --- a/bin/node/src/args.rs +++ b/bin/node/src/args.rs @@ -123,6 +123,7 @@ impl LegacyRpcArgs { } } +#[derive(Debug, Clone, Args, PartialEq, Eq, Default)] pub struct FlashblocksRpcArgs { /// Enable flashblocks RPC #[arg( @@ -292,13 +293,19 @@ mod tests { "--xlayer.flashblocks-subscription", "--xlayer.flashblocks-subscription-max-addresses", "2000", + "--xlayer.flashblocks-url", + "ws://localhost:1111", ]) .args; - assert!(args.enable_flashblocks_subscription); assert!(args.legacy.legacy_rpc_url.is_some()); assert_eq!(args.legacy.legacy_rpc_timeout, Duration::from_secs(45)); - assert_eq!(args.flashblocks_subscription_max_addresses, 2000); + assert!(args.flashblocks_rpc.enable_flashblocks_subscription); + assert_eq!(args.flashblocks_rpc.flashblocks_subscription_max_addresses, 2000); + assert_eq!( + args.flashblocks_rpc.flashblock_url, + Some(Url::parse("ws://localhost:1111").unwrap()) + ); assert!(args.validate().is_ok()); } diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index bca84f0b..ce5b0d17 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -11,7 +11,6 @@ use either::Either; use std::sync::Arc; use tracing::info; -use reth::providers::BlockNumReader; use reth::rpc::eth::EthApiTypes; use reth::{ builder::{DebugNodeLauncher, EngineNodeLauncher, Node, NodeHandle, TreeConfig}, @@ -23,11 +22,13 @@ use reth_rpc_server_types::RethRpcModule; use xlayer_chainspec::XLayerChainSpecParser; use xlayer_flashblocks::{ - cache::FlashblockStateCache, FlashblocksPubSub, FlashblocksRpcService, WsFlashBlockStream, + FlashblockStateCache, FlashblocksPubSub, FlashblocksRpcService, WsFlashBlockStream, }; use xlayer_legacy_rpc::{layer::LegacyRpcRouterLayer, LegacyRpcRouterConfig}; use xlayer_monitor::{start_monitor_handle, RpcMonitorLayer, XLayerMonitor}; -use xlayer_rpc::{EthApiOverrideServer, XLayerEthApiExt, XlayerRpcExt, XlayerRpcExtApiServer}; +use xlayer_rpc::{ + DefaultRpcExt, DefaultRpcExtApiServer, FlashblocksEthApiExt, FlashblocksEthApiOverrideServer, +}; #[global_allocator] static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); @@ -126,7 +127,7 @@ fn main() { let flashblocks_state = FlashblockStateCache::new(); let stream = WsFlashBlockStream::new(flashblock_url); let service = FlashblocksRpcService::new( - ctx.node().task_executor().clone(), + ctx.node().task_executor.clone(), stream, args.xlayer_args.builder.flashblocks, args.rollup_args.flashblocks_url.is_some(), @@ -142,7 +143,7 @@ fn main() { flashblocks_state.subscribe_pending_sequence(), Box::new(ctx.node().task_executor.clone()), new_op_eth_api.converter().clone(), - xlayer_args.flashblocks_subscription_max_addresses, + args.xlayer_args.flashblocks_rpc.flashblocks_subscription_max_addresses, ); ctx.modules.add_or_replace_if_module_configured( RethRpcModule::Eth, @@ -152,13 +153,13 @@ fn main() { } // Register flashblocks Eth API overrides - let flashblocks_eth = XLayerEthApiExt::new( + let flashblocks_eth = FlashblocksEthApiExt::new( ctx.registry.eth_api().clone(), flashblocks_state.clone(), ); ctx.modules.add_or_replace_if_module_configured( RethRpcModule::Eth, - EthApiOverrideServer::into_rpc(flashblocks_eth), + FlashblocksEthApiOverrideServer::into_rpc(flashblocks_eth), )?; info!(target: "reth::cli", "xlayer flashblocks eth api overrides initialized"); Some(flashblocks_state) @@ -167,8 +168,8 @@ fn main() { }; // Register X Layer RPC - let xlayer_rpc = XlayerRpcExt::new(flashblocks_state); - ctx.modules.merge_configured(XlayerRpcExtApiServer::into_rpc( + let xlayer_rpc = DefaultRpcExt::new(flashblocks_state); + ctx.modules.merge_configured(DefaultRpcExtApiServer::into_rpc( xlayer_rpc, ))?; info!(target: "reth::cli", "xlayer eth rpc extension enabled"); diff --git a/crates/rpc/src/xlayer_ext.rs b/crates/rpc/src/default.rs similarity index 82% rename from crates/rpc/src/xlayer_ext.rs rename to crates/rpc/src/default.rs index d7e1788e..dfb755b2 100644 --- a/crates/rpc/src/xlayer_ext.rs +++ b/crates/rpc/src/default.rs @@ -14,9 +14,9 @@ pub trait SequencerClientProvider { fn sequencer_client(&self) -> Option<&SequencerClient>; } -/// `XLayer`-specific RPC API trait. +/// X Layer default Eth JSON-RPC API extension trait. #[rpc(server, namespace = "eth")] -pub trait XlayerRpcExtApi { +pub trait DefaultRpcExtApi { /// Returns boolean indicating if the node's flashblocks RPC functionality is enabled, /// and if the flashblocks state cache is initialized. /// @@ -27,21 +27,21 @@ pub trait XlayerRpcExtApi { async fn flashblocks_enabled(&self) -> RpcResult; } -/// `XLayer` RPC extension implementation. +/// X Layer default Eth JSON-RPC API extension implementation. #[derive(Debug, Clone)] -pub struct XlayerRpcExt { +pub struct DefaultRpcExt { flashblocks_state: Option>, } -impl XlayerRpcExt { - /// Creates a new [`XlayerRpcExt`]. +impl DefaultRpcExt { + /// Creates a new [`DefaultRpcExt`]. pub fn new(flashblocks_state: Option>) -> Self { Self { flashblocks_state } } } #[async_trait] -impl XlayerRpcExtApiServer for XlayerRpcExt { +impl DefaultRpcExtApiServer for DefaultRpcExt { /// Handler for: `eth_flashblocksEnabled` async fn flashblocks_enabled(&self) -> RpcResult { Ok(self.flashblocks_state.as_ref().is_some_and(|cache| cache.get_confirm_height() > 0)) @@ -54,14 +54,14 @@ mod tests { #[test] fn test_flashblocks_disabled_when_no_cache() { - let ext = XlayerRpcExt::new(None); + let ext = DefaultRpcExt::new(None); assert!(ext.flashblocks_state.is_none()); } #[test] fn test_flashblocks_disabled_at_zero_height() { let cache = FlashblockStateCache::::new(); - let ext = XlayerRpcExt::new(Some(cache)); + let ext = DefaultRpcExt::new(Some(cache)); assert!(ext.flashblocks_state.as_ref().unwrap().get_confirm_height() == 0); } } diff --git a/crates/rpc/src/eth.rs b/crates/rpc/src/flashblocks.rs similarity index 98% rename from crates/rpc/src/eth.rs rename to crates/rpc/src/flashblocks.rs index fe3c100e..8f8b7135 100644 --- a/crates/rpc/src/eth.rs +++ b/crates/rpc/src/flashblocks.rs @@ -197,7 +197,7 @@ pub trait FlashblocksEthApiOverride { /// Extended Eth API with flashblocks cache overlay. #[derive(Debug)] -pub struct XLayerEthApiExt { +pub struct FlashblocksEthApiExt { eth_api: OpEthApi, /// Stored separately to avoid associated type projection ambiguity when /// the trait solver processes ` as EthApiTypes>::RpcConvert`. @@ -205,8 +205,8 @@ pub struct XLayerEthApiExt { flashblocks_state: FlashblockStateCache, } -impl XLayerEthApiExt { - /// Creates a new [`XLayerEthApiExt`]. +impl FlashblocksEthApiExt { + /// Creates a new [`FlashblocksEthApiExt`]. pub fn new( eth_api: OpEthApi, flashblocks_state: FlashblockStateCache, @@ -220,10 +220,11 @@ impl XLayerEthApiExt { } #[async_trait] -impl FlashblocksEthApiOverrideServer for XLayerEthApiExt +impl FlashblocksEthApiOverrideServer for FlashblocksEthApiExt where N: RpcNodeCore, - Rpc: RpcConvert, + Rpc: RpcConvert + + RpcConvert, OpEthApi: FullEthApi + EthApiTypes + RpcNodeCore @@ -584,10 +585,11 @@ where } } -impl XLayerEthApiExt +impl FlashblocksEthApiExt where N: RpcNodeCore, - Rpc: RpcConvert, + Rpc: RpcConvert + + RpcConvert, OpEthApi: RpcNodeCore + Send + Sync + 'static, { /// Returns a `StateProvider` overlaying flashblock execution state on top of canonical state diff --git a/crates/rpc/src/helper.rs b/crates/rpc/src/helper.rs index 47316349..a500ddcf 100644 --- a/crates/rpc/src/helper.rs +++ b/crates/rpc/src/helper.rs @@ -1,5 +1,4 @@ use alloy_consensus::{BlockHeader, TxReceipt}; -use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; use alloy_rpc_types_eth::TransactionInfo; use op_alloy_network::Optimism; diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index 5af0eeac..aae2ef8c 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -1,12 +1,12 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg))] -pub mod eth; +pub mod default; +pub mod flashblocks; pub mod helper; -pub mod xlayer_ext; -pub use eth::{FlashblocksEthApiOverrideServer, XLayerEthApiExt}; -pub use xlayer_ext::{SequencerClientProvider, XlayerRpcExt, XlayerRpcExtApiServer}; +pub use default::{DefaultRpcExt, DefaultRpcExtApiServer, SequencerClientProvider}; +pub use flashblocks::{FlashblocksEthApiExt, FlashblocksEthApiOverrideServer}; // Implement `SequencerClientProvider` for `OpEthApi` use reth_optimism_rpc::{OpEthApi, SequencerClient}; From 832966472545193c7fa71974fc022d5b6fbffc86 Mon Sep 17 00:00:00 2001 From: Niven Date: Tue, 17 Mar 2026 18:56:31 +0800 Subject: [PATCH 36/76] chore(rpc): remove unused dependencies and clean up MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove unused `reth-rpc` and `async-trait` dependencies from `crates/rpc/Cargo.toml`. Remove stale comment on converter field, fix redundant parentheses in payload builder, and use associated `N::Primitives` type instead of concrete `OpPrimitives` in `get_flashblock_state_provider_by_id` return type. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.6 --- bin/node/src/payload.rs | 2 +- crates/rpc/Cargo.toml | 2 -- crates/rpc/src/flashblocks.rs | 4 +--- crates/rpc/src/lib.rs | 2 +- 4 files changed, 3 insertions(+), 7 deletions(-) diff --git a/bin/node/src/payload.rs b/bin/node/src/payload.rs index e308c968..1160030f 100644 --- a/bin/node/src/payload.rs +++ b/bin/node/src/payload.rs @@ -46,7 +46,7 @@ impl XLayerPayloadServiceBuilder { da_config: OpDAConfig, gas_limit_config: OpGasLimitConfig, ) -> eyre::Result { - let builder = if (xlayer_builder_args.flashblocks.enabled || flashblock_rpc) { + let builder = if xlayer_builder_args.flashblocks.enabled || flashblock_rpc { let builder_config = BuilderConfig::try_from(xlayer_builder_args)?; XLayerPayloadServiceBuilderInner::Flashblocks(Box::new(FlashblocksServiceBuilder( builder_config, diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index b1cebb3f..febf835f 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -18,7 +18,6 @@ reth-chain-state.workspace = true reth-optimism-rpc.workspace = true reth-optimism-primitives.workspace = true reth-primitives-traits.workspace = true -reth-rpc.workspace = true reth-rpc-convert.workspace = true reth-rpc-eth-api.workspace = true reth-rpc-eth-types.workspace = true @@ -38,7 +37,6 @@ op-alloy-network.workspace = true op-alloy-rpc-types.workspace = true # rpc -async-trait.workspace = true jsonrpsee.workspace = true jsonrpsee-types.workspace = true diff --git a/crates/rpc/src/flashblocks.rs b/crates/rpc/src/flashblocks.rs index 8f8b7135..1ea15fd2 100644 --- a/crates/rpc/src/flashblocks.rs +++ b/crates/rpc/src/flashblocks.rs @@ -199,8 +199,6 @@ pub trait FlashblocksEthApiOverride { #[derive(Debug)] pub struct FlashblocksEthApiExt { eth_api: OpEthApi, - /// Stored separately to avoid associated type projection ambiguity when - /// the trait solver processes ` as EthApiTypes>::RpcConvert`. converter: Rpc, flashblocks_state: FlashblockStateCache, } @@ -597,7 +595,7 @@ where fn get_flashblock_state_provider_by_id( &self, block_id: Option, - ) -> RpcResult)>> { + ) -> RpcResult)>> { let canon_state = self.eth_api.provider().latest().to_rpc_result()?; Ok(self.flashblocks_state.get_state_provider_by_id(block_id, canon_state)) } diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index aae2ef8c..ea567160 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -8,10 +8,10 @@ pub mod helper; pub use default::{DefaultRpcExt, DefaultRpcExtApiServer, SequencerClientProvider}; pub use flashblocks::{FlashblocksEthApiExt, FlashblocksEthApiOverrideServer}; -// Implement `SequencerClientProvider` for `OpEthApi` use reth_optimism_rpc::{OpEthApi, SequencerClient}; use reth_rpc_eth_api::{RpcConvert, RpcNodeCore}; +// Implement `SequencerClientProvider` for `OpEthApi` impl SequencerClientProvider for OpEthApi where N: RpcNodeCore, From bf9f91a72a21e7c5a66355d02d138ca6d9e29e58 Mon Sep 17 00:00:00 2001 From: Niven Date: Tue, 17 Mar 2026 19:07:16 +0800 Subject: [PATCH 37/76] Fix --- crates/rpc/src/flashblocks.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/src/flashblocks.rs b/crates/rpc/src/flashblocks.rs index 1ea15fd2..8bc1c90a 100644 --- a/crates/rpc/src/flashblocks.rs +++ b/crates/rpc/src/flashblocks.rs @@ -595,7 +595,7 @@ where fn get_flashblock_state_provider_by_id( &self, block_id: Option, - ) -> RpcResult)>> { + ) -> RpcResult)>> { let canon_state = self.eth_api.provider().latest().to_rpc_result()?; Ok(self.flashblocks_state.get_state_provider_by_id(block_id, canon_state)) } From 2f9b01e0d5630001dc0622f2f86b02cccd232c7f Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 18 Mar 2026 07:57:40 +0800 Subject: [PATCH 38/76] Revert "style(flashblocks): reformat validator match expression" This reverts commit 94e1d8ac79d9f99064744d668c8517a7bae3cac9. --- crates/flashblocks/src/execution/validator.rs | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index 6d8f0e78..42db3abf 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -140,32 +140,32 @@ where // the current pending sequence (current pending promoted to confirm, incoming // sequence is the next height). Fall back to the flashblocks overlay via // `get_pending_state_provider`. - let (state_provider, parent_header) = match self.provider.history_by_block_hash(parent_hash) - { - Ok(canon_provider) => { - let header = self - .provider - .sealed_header_by_hash(parent_hash)? - .ok_or_else(|| eyre::eyre!("parent header not found for hash {parent_hash}"))?; - (canon_provider, header) - } - Err(err) => { - trace!( - target: "flashblocks", - error = %err, - "parent not in canonical chain, try getting state from pending state", - ); - let canonical_state = self.provider.latest()?; - self.flashblocks_state.get_pending_state_provider(canonical_state).ok_or_else( - || { - eyre::eyre!( - "parent {parent_hash} not in canonical chain and no \ + let (state_provider, parent_header) = + match self.provider.history_by_block_hash(parent_hash) { + Ok(canon_provider) => { + let header = + self.provider.sealed_header_by_hash(parent_hash)?.ok_or_else(|| { + eyre::eyre!("parent header not found for hash {parent_hash}") + })?; + (canon_provider, header) + } + Err(err) => { + trace!( + target: "flashblocks", + error = %err, + "parent not in canonical chain, try getting state from pending state", + ); + let canonical_state = self.provider.latest()?; + self.flashblocks_state.get_pending_state_provider(canonical_state).ok_or_else( + || { + eyre::eyre!( + "parent {parent_hash} not in canonical chain and no \ pending state available for overlay" - ) - }, - )? - } - }; + ) + }, + )? + } + }; let outcome = self.processor.process_fresh( &base, From 634594d03c23eb5a99fe7d0e8e36af5976912ca1 Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 18 Mar 2026 07:57:51 +0800 Subject: [PATCH 39/76] Revert "feat: revamp worker, split into processor and validator" This reverts commit f2d37526175523cdaba87d8fe47b626add59386a. --- Cargo.lock | 1 - crates/flashblocks/src/execution/mod.rs | 15 +- crates/flashblocks/src/execution/processor.rs | 216 ---------- crates/flashblocks/src/execution/validator.rs | 292 ------------- crates/flashblocks/src/execution/worker.rs | 396 ++++++++++++++++++ crates/rpc/Cargo.toml | 1 - 6 files changed, 397 insertions(+), 524 deletions(-) delete mode 100644 crates/flashblocks/src/execution/processor.rs delete mode 100644 crates/flashblocks/src/execution/validator.rs create mode 100644 crates/flashblocks/src/execution/worker.rs diff --git a/Cargo.lock b/Cargo.lock index 60bf1f88..7be4e612 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14352,7 +14352,6 @@ dependencies = [ "jsonrpsee", "jsonrpsee-types", "op-alloy-network", - "op-alloy-rpc-types", "reth-chain-state", "reth-optimism-primitives", "reth-optimism-rpc", diff --git a/crates/flashblocks/src/execution/mod.rs b/crates/flashblocks/src/execution/mod.rs index 228b9576..89adac12 100644 --- a/crates/flashblocks/src/execution/mod.rs +++ b/crates/flashblocks/src/execution/mod.rs @@ -1,5 +1,4 @@ -pub(crate) mod processor; -pub(crate) mod validator; +pub(crate) mod worker; use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; use reth_optimism_primitives::OpReceipt; @@ -10,18 +9,6 @@ pub(crate) struct BuildArgs { pub(crate) last_flashblock_index: u64, } -/// State root strategies during flashblocks sequence validation. -#[derive(Debug, Clone, Copy, Default)] -pub(crate) enum StateRootStrategy { - /// Synchronous state root computation - #[default] - Synchronous, - /// Parallel state root computation - Parallel, - /// Sparse trie task - SparseTrieTask, -} - /// Receipt requirements for cache-resume flow. pub trait FlashblockCachedReceipt: Clone { /// Adds `gas_offset` to each receipt's `cumulative_gas_used`. diff --git a/crates/flashblocks/src/execution/processor.rs b/crates/flashblocks/src/execution/processor.rs deleted file mode 100644 index eb0f54eb..00000000 --- a/crates/flashblocks/src/execution/processor.rs +++ /dev/null @@ -1,216 +0,0 @@ -use crate::{execution::StateRootStrategy, FlashblockCachedReceipt}; -use alloy_eips::eip2718::WithEncoded; -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; - -use reth_errors::RethError; -use reth_evm::{ - execute::{ - BlockAssembler, BlockAssemblerInput, BlockBuilder, BlockBuilderOutcome, BlockExecutor, - }, - ConfigureEvm, Evm, -}; -use reth_execution_types::BlockExecutionResult; -use reth_primitives_traits::{HeaderTy, NodePrimitives, Recovered, RecoveredBlock}; -use reth_revm::{ - cached::CachedReads, - database::StateProviderDatabase, - db::{states::bundle_state::BundleRetention, BundleState, State}, -}; -use reth_storage_api::StateProvider; -use reth_trie_common::HashedPostState; - -/// Data returned from processor to validator for cache commit. -pub(crate) struct ProcessorOutcome { - pub(crate) execution_result: BlockExecutionResult, - pub(crate) block: RecoveredBlock, - pub(crate) hashed_state: HashedPostState, - pub(crate) bundle: BundleState, - pub(crate) read_cache: CachedReads, -} - -/// Data extracted by validator from `PendingSequence`, passed to processor for incremental -/// execution. -pub(crate) struct IncrementalPrestate { - pub(crate) prestate_bundle: BundleState, - pub(crate) cached_tx_count: usize, - pub(crate) cached_receipts: Vec, - pub(crate) cached_gas_used: u64, - pub(crate) cached_blob_gas_used: u64, - pub(crate) cached_reads: CachedReads, -} - -/// Handles transaction execution, state root computation, and block assembly for flashblock -/// sequences. -/// -/// Separated from [`super::validator::FlashblockSequenceValidator`] so that configurable state -/// root strategies (`Synchronous`, `Parallel`, `SparseTrieTask`) live cleanly here, while the -/// validator handles flashblocks-specific cache orchestration. -#[derive(Debug)] -pub(crate) struct FlashblockSequenceProcessor { - evm_config: EvmConfig, - state_root_strategy: StateRootStrategy, - _primitives: std::marker::PhantomData, -} - -impl FlashblockSequenceProcessor -where - N: NodePrimitives, - N::Receipt: FlashblockCachedReceipt, -{ - pub(crate) fn new(evm_config: EvmConfig) -> Self { - Self { - evm_config, - state_root_strategy: StateRootStrategy::default(), - _primitives: std::marker::PhantomData, - } - } -} - -impl FlashblockSequenceProcessor -where - N: NodePrimitives, - N::Receipt: FlashblockCachedReceipt, - EvmConfig: ConfigureEvm + Unpin>, -{ - /// Full flashblocks sequence execution from scratch. - pub(crate) fn process_fresh( - &self, - base: &OpFlashblockPayloadBase, - transactions: &[WithEncoded>], - state_provider: &dyn StateProvider, - parent_header: &reth_primitives_traits::SealedHeader>, - ) -> eyre::Result> { - let mut read_cache = CachedReads::default(); - let cached_db = read_cache.as_db_mut(StateProviderDatabase::new(state_provider)); - let mut state = State::builder().with_database(cached_db).with_bundle_update().build(); - - let mut builder = self - .evm_config - .builder_for_next_block(&mut state, parent_header, base.clone().into()) - .map_err(RethError::other)?; - builder.apply_pre_execution_changes()?; - for tx in transactions { - builder.execute_transaction(tx.clone())?; - } - let BlockBuilderOutcome { execution_result, block, hashed_state, .. } = - builder.finish(state_provider)?; - let bundle = state.take_bundle(); - - Ok(ProcessorOutcome { execution_result, block, hashed_state, bundle, read_cache }) - } - - /// Suffix-only execution reusing cached prefix state from an existing pending sequence. - pub(crate) fn process_incremental( - &self, - base: &OpFlashblockPayloadBase, - transactions: &[WithEncoded>], - state_provider: &dyn StateProvider, - parent_header: &reth_primitives_traits::SealedHeader>, - prestate: IncrementalPrestate, - ) -> eyre::Result> { - let mut read_cache = prestate.cached_reads; - let cached_db = read_cache.as_db_mut(StateProviderDatabase::new(state_provider)); - let mut state = State::builder() - .with_database(cached_db) - .with_bundle_prestate(prestate.prestate_bundle) - .with_bundle_update() - .build(); - - let attrs = base.clone().into(); - let evm_env = - self.evm_config.next_evm_env(parent_header, &attrs).map_err(RethError::other)?; - let execution_ctx = self - .evm_config - .context_for_next_block(parent_header, attrs) - .map_err(RethError::other)?; - - // Skip `apply_pre_execution_changes` (already applied in the original fresh build). - // The only pre-execution effect we need is `set_state_clear_flag`, which configures - // EVM empty-account handling (OP Stack chains activate Spurious Dragon at genesis, so - // this is always true). - state.set_state_clear_flag(true); - let evm = self.evm_config.evm_with_env(&mut state, evm_env.clone()); - let mut executor = self.evm_config.create_executor(evm, execution_ctx.clone()); - - for tx in transactions.iter().skip(prestate.cached_tx_count).cloned() { - executor.execute_transaction(tx)?; - } - - let (evm, execution_result) = executor.finish()?; - let (db, _evm_env) = evm.finish(); - db.merge_transitions(BundleRetention::Reverts); - - let execution_result = merge_cached_block_execution_results::( - prestate.cached_receipts, - prestate.cached_gas_used, - prestate.cached_blob_gas_used, - execution_result, - ); - - // Compute state root - let hashed_state = state_provider.hashed_post_state(&db.bundle_state); - let (state_root, _) = self.compute_state_root(state_provider, &hashed_state)?; - let bundle = db.take_bundle(); - - // Assemble block - let (block_transactions, senders): (Vec<_>, Vec<_>) = - transactions.iter().map(|tx| tx.1.clone().into_parts()).unzip(); - let block = self - .evm_config - .block_assembler() - .assemble_block(BlockAssemblerInput::new( - evm_env, - execution_ctx, - parent_header, - block_transactions, - &execution_result, - &bundle, - state_provider, - state_root, - )) - .map_err(RethError::other)?; - let block = RecoveredBlock::new_unhashed(block, senders); - - Ok(ProcessorOutcome { execution_result, block, hashed_state, bundle, read_cache }) - } - - /// Dispatches state root computation based on the configured [`StateRootStrategy`]. - fn compute_state_root( - &self, - state_provider: &dyn StateProvider, - hashed_state: &HashedPostState, - ) -> eyre::Result<(alloy_primitives::B256, reth_trie_common::updates::TrieUpdates)> { - match self.state_root_strategy { - StateRootStrategy::Synchronous => state_provider - .state_root_with_updates(hashed_state.clone()) - .map_err(|e| eyre::eyre!(e)), - StateRootStrategy::Parallel | StateRootStrategy::SparseTrieTask => { - unimplemented!( - "Parallel and SparseTrieTask state root strategies are not yet implemented" - ) - } - } - } -} - -/// Merges prefix (cached) and suffix execution results into a single -/// [`BlockExecutionResult`]. -fn merge_cached_block_execution_results( - cached_receipts: Vec, - cached_gas_used: u64, - cached_blob_gas_used: u64, - mut execution_result: BlockExecutionResult, -) -> BlockExecutionResult -where - N::Receipt: FlashblockCachedReceipt, -{ - N::Receipt::add_cumulative_gas_offset(&mut execution_result.receipts, cached_gas_used); - let mut receipts = cached_receipts; - receipts.extend(execution_result.receipts); - BlockExecutionResult { - receipts, - requests: execution_result.requests, - gas_used: cached_gas_used.saturating_add(execution_result.gas_used), - blob_gas_used: cached_blob_gas_used.saturating_add(execution_result.blob_gas_used), - } -} diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs deleted file mode 100644 index 42db3abf..00000000 --- a/crates/flashblocks/src/execution/validator.rs +++ /dev/null @@ -1,292 +0,0 @@ -use crate::{ - cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}, - execution::{ - processor::{FlashblockSequenceProcessor, IncrementalPrestate, ProcessorOutcome}, - BuildArgs, - }, - FlashblockCachedReceipt, -}; -use alloy_eips::eip2718::WithEncoded; -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; -use std::{ - collections::HashMap, - sync::Arc, - time::{Duration, Instant}, -}; -use tracing::*; - -use reth_chain_state::{ComputedTrieData, ExecutedBlock}; -use reth_evm::ConfigureEvm; -use reth_execution_types::BlockExecutionOutput; -use reth_primitives_traits::{ - transaction::TxHashRef, BlockBody, HeaderTy, NodePrimitives, Recovered, -}; -use reth_rpc_eth_types::PendingBlock; -use reth_storage_api::{HeaderProvider, StateProviderFactory}; - -/// Builds the [`PendingSequence`]s from the accumulated flashblock transaction sequences. -/// Commits results directly to [`FlashblockStateCache`] via `handle_pending_sequence()`. -/// -/// Supports two execution modes: -/// - **Fresh**: Full execution for a new block height. -/// - **Incremental**: Suffix-only execution reusing cached prefix state from an existing -/// pending sequence at the same height. -/// -/// Delegates transaction execution and block assembly to -/// [`FlashblockSequenceProcessor`]. -#[derive(Debug)] -pub(crate) struct FlashblockSequenceValidator -where - N::Receipt: FlashblockCachedReceipt, -{ - /// Handles transaction execution, state root computation, and block assembly. - processor: FlashblockSequenceProcessor, - /// The state provider factory for resolving canonical and historical state. - provider: Provider, - /// The flashblocks state cache containing the flashblocks state cache layer. - flashblocks_state: FlashblockStateCache, -} - -impl FlashblockSequenceValidator -where - N::Receipt: FlashblockCachedReceipt, -{ - pub(crate) fn new( - evm_config: EvmConfig, - provider: Provider, - flashblocks_state: FlashblockStateCache, - ) -> Self { - Self { - processor: FlashblockSequenceProcessor::new(evm_config), - provider, - flashblocks_state, - } - } - - pub(crate) const fn provider(&self) -> &Provider { - &self.provider - } -} - -impl FlashblockSequenceValidator -where - N: NodePrimitives, - N::Receipt: FlashblockCachedReceipt, - EvmConfig: ConfigureEvm + Unpin>, - Provider: StateProviderFactory + HeaderProvider
> + Unpin, -{ - /// Executes a flashblock transaction sequence and commits the result to the flashblocks - /// state cache. Note that the flashblocks sequence validator should be the only handle - /// that advances the flashblocks state cache tip. - /// - /// Determines execution mode from the current pending state: - /// - No pending sequence exists, cache not yet initialized → fresh build. - /// - Pending is at a different height → fresh build. - /// - If pending exists at the same height → incremental build. - pub(crate) fn execute>>>( - &mut self, - args: BuildArgs, - ) -> eyre::Result<()> { - let block_number = args.base.block_number; - let transactions: Vec<_> = args.transactions.into_iter().collect(); - - // Determine execution mode from pending state - let pending = self.flashblocks_state.get_pending_sequence(); - let pending_height = pending.as_ref().map(|p| p.get_height()); - let incremental = pending_height == Some(block_number); - - // Validate height continuity - if let Some(pending_height) = pending_height - && block_number != pending_height - && block_number != pending_height + 1 - { - warn!( - target: "flashblocks", - incoming_height = block_number, - pending_height = pending_height, - "state mismatch from incoming sequence to current pending tip", - ); - return Err(eyre::eyre!( - "state mismatch from incoming sequence to current pending tip" - )); - } - - if incremental { - self.execute_incremental( - args.base, - transactions, - args.last_flashblock_index, - pending.unwrap(), - ) - } else { - self.execute_fresh(args.base, transactions, args.last_flashblock_index) - } - } - - /// Full flashblocks sequence execution from a new block height. - /// - /// Resolves the state provider and parent header, then delegates execution to the - /// processor. - fn execute_fresh( - &self, - base: OpFlashblockPayloadBase, - transactions: Vec>>, - last_flashblock_index: u64, - ) -> eyre::Result<()> { - let parent_hash = base.parent_hash; - - // Prioritize trying to get parent hash state from canonical provider first. If - // the parent is not in the canonical chain, then try building fresh on top of - // the current pending sequence (current pending promoted to confirm, incoming - // sequence is the next height). Fall back to the flashblocks overlay via - // `get_pending_state_provider`. - let (state_provider, parent_header) = - match self.provider.history_by_block_hash(parent_hash) { - Ok(canon_provider) => { - let header = - self.provider.sealed_header_by_hash(parent_hash)?.ok_or_else(|| { - eyre::eyre!("parent header not found for hash {parent_hash}") - })?; - (canon_provider, header) - } - Err(err) => { - trace!( - target: "flashblocks", - error = %err, - "parent not in canonical chain, try getting state from pending state", - ); - let canonical_state = self.provider.latest()?; - self.flashblocks_state.get_pending_state_provider(canonical_state).ok_or_else( - || { - eyre::eyre!( - "parent {parent_hash} not in canonical chain and no \ - pending state available for overlay" - ) - }, - )? - } - }; - - let outcome = self.processor.process_fresh( - &base, - &transactions, - state_provider.as_ref(), - &parent_header, - )?; - - self.commit_pending_sequence(base, transactions, last_flashblock_index, outcome) - } - - /// Incremental execution for the same block height as the current pending. Reuses - /// the pending sequence's `BundleState` as prestate and its warm `CachedReads`, - /// executing only new unexecuted transactions from incremental flashblock payloads. - fn execute_incremental( - &self, - base: OpFlashblockPayloadBase, - transactions: Vec>>, - last_flashblock_index: u64, - pending: PendingSequence, - ) -> eyre::Result<()> { - if pending.last_flashblock_index != last_flashblock_index { - warn!( - target: "flashblocks", - incoming_last_flashblock_index = last_flashblock_index, - pending_last_flashblock_index = pending.last_flashblock_index, - "state mismatch from incoming sequence to current pending tip", - ); - return Err(eyre::eyre!( - "state mismatch, last flashblock index mismatch pending index" - )); - } - - // Get latest canonical state, then overlay flashblocks state cache blocks - // from canonical height up to the parent hash. - let parent_hash = base.parent_hash; - let canonical_state = self.provider.latest()?; - let (state_provider, parent_header) = self - .flashblocks_state - .get_state_provider_by_hash(parent_hash, canonical_state) - .ok_or_else(|| { - eyre::eyre!("failed to build overlay state provider for parent {parent_hash}") - })?; - - // Extract prestate from current pending - let exec_output = &pending.pending.executed_block.execution_output; - let prestate = IncrementalPrestate { - prestate_bundle: exec_output.state.clone(), - cached_tx_count: pending - .pending - .executed_block - .recovered_block - .body() - .transaction_count(), - cached_receipts: exec_output.result.receipts.clone(), - cached_gas_used: exec_output.result.gas_used, - cached_blob_gas_used: exec_output.result.blob_gas_used, - cached_reads: pending.cached_reads, - }; - - let outcome = self.processor.process_incremental( - &base, - &transactions, - state_provider.as_ref(), - &parent_header, - prestate, - )?; - - self.commit_pending_sequence(base, transactions, last_flashblock_index, outcome) - } - - /// Builds a [`PendingSequence`] from a [`ProcessorOutcome`] and commits it to the - /// flashblocks state cache. - fn commit_pending_sequence( - &self, - base: OpFlashblockPayloadBase, - transactions: Vec>>, - last_flashblock_index: u64, - outcome: ProcessorOutcome, - ) -> eyre::Result<()> { - let block_hash = outcome.block.hash(); - let parent_hash = base.parent_hash; - - let execution_outcome = Arc::new(BlockExecutionOutput { - state: outcome.bundle, - result: outcome.execution_result, - }); - let executed_block = ExecutedBlock::new( - outcome.block.into(), - execution_outcome.clone(), - ComputedTrieData::without_trie_input( - Arc::new(outcome.hashed_state.into_sorted()), - Arc::default(), - ), - ); - let pending_block = PendingBlock::with_executed_block( - Instant::now() + Duration::from_secs(1), - executed_block, - ); - - // Build tx index - let mut tx_index = HashMap::with_capacity(transactions.len()); - for (idx, tx) in transactions.iter().enumerate() { - tx_index.insert( - *tx.tx_hash(), - CachedTxInfo { - block_number: base.block_number, - block_hash, - tx_index: idx as u64, - tx: tx.1.clone().into_inner(), - receipt: execution_outcome.result.receipts[idx].clone(), - }, - ); - } - self.flashblocks_state.handle_pending_sequence(PendingSequence { - pending: pending_block, - tx_index, - cached_reads: outcome.read_cache, - block_hash, - parent_hash, - last_flashblock_index, - }) - } -} diff --git a/crates/flashblocks/src/execution/worker.rs b/crates/flashblocks/src/execution/worker.rs new file mode 100644 index 00000000..5f7d9e05 --- /dev/null +++ b/crates/flashblocks/src/execution/worker.rs @@ -0,0 +1,396 @@ +use crate::{ + cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}, + execution::BuildArgs, + FlashblockCachedReceipt, +}; +use alloy_eips::eip2718::WithEncoded; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; +use std::{ + collections::HashMap, + sync::Arc, + time::{Duration, Instant}, +}; +use tracing::*; + +use reth_chain_state::{ComputedTrieData, ExecutedBlock}; +use reth_errors::RethError; +use reth_evm::{ + execute::{ + BlockAssembler, BlockAssemblerInput, BlockBuilder, BlockBuilderOutcome, BlockExecutor, + }, + ConfigureEvm, Evm, +}; +use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; +use reth_primitives_traits::{ + transaction::TxHashRef, BlockBody, HeaderTy, NodePrimitives, Recovered, RecoveredBlock, +}; +use reth_revm::{ + cached::CachedReads, + database::StateProviderDatabase, + db::{states::bundle_state::BundleRetention, BundleState, State}, +}; +use reth_rpc_eth_types::PendingBlock; +use reth_storage_api::{ + HashedPostStateProvider, HeaderProvider, StateProviderFactory, StateRootProvider, +}; +use reth_trie_common::HashedPostState; + +/// Builds the [`PendingSequence`]s from the accumulated flashblock transaction sequences. +/// Commits results directly to [`FlashblockStateCache`] via `handle_pending_sequence()`. +/// +/// Supports two execution modes: +/// - **Fresh**: Full execution for a new block height. +/// - **Incremental**: Suffix-only execution reusing cached prefix state from an existing +/// pending sequence at the same height. +#[derive(Debug)] +pub(crate) struct FlashblockSequenceValidator +where + N::Receipt: FlashblockCachedReceipt, +{ + /// The EVM configuration used to build the flashblocks. + evm_config: EvmConfig, + /// The canonical chainstate provider. + provider: Provider, + /// The flashblocks state cache containing the flashblocks state cache layer. + flashblocks_state: FlashblockStateCache, +} + +impl FlashblockSequenceValidator +where + N::Receipt: FlashblockCachedReceipt, +{ + pub(crate) fn new( + evm_config: EvmConfig, + provider: Provider, + flashblocks_state: FlashblockStateCache, + ) -> Self { + Self { evm_config, provider, flashblocks_state } + } + + pub(crate) const fn provider(&self) -> &Provider { + &self.provider + } +} + +impl FlashblockSequenceValidator +where + N: NodePrimitives, + N::Receipt: FlashblockCachedReceipt, + EvmConfig: ConfigureEvm + Unpin>, + Provider: StateProviderFactory + HeaderProvider
> + Unpin, +{ + /// Executes a flashblock transaction sequence and commits the result to the flashblocks + /// state cache. Note that the flashblocks sequence validator should be the only handle + /// that advances the flashblocks state cache tip. + /// + /// Determines execution mode from the current pending state: + /// - No pending sequence exists, cache not yet initialized → fresh build. + /// - Pending is at a different height → fresh build. + /// - If pending exists at the same height → incremental build. + pub(crate) fn execute>>>( + &mut self, + args: BuildArgs, + ) -> eyre::Result<()> { + let block_number = args.base.block_number; + let transactions: Vec<_> = args.transactions.into_iter().collect(); + + // Determine execution mode from pending state + let pending = self.flashblocks_state.get_pending_sequence(); + let pending_height = pending.as_ref().map(|p| p.get_height()); + let incremental = pending_height == Some(block_number); + + // Validate height continuity + if let Some(pending_height) = pending_height + && block_number != pending_height + && block_number != pending_height + 1 + { + // State cache is polluted + warn!( + target: "flashblocks", + incoming_height = block_number, + pending_height = pending_height, + "state mismatch from incoming sequence to current pending tip", + ); + return Err(eyre::eyre!( + "state mismatch from incoming sequence to current pending tip" + )); + } + + if incremental { + self.execute_incremental( + args.base, + transactions, + args.last_flashblock_index, + pending.unwrap(), + ) + } else { + self.execute_fresh(args.base, transactions, args.last_flashblock_index) + } + } + + /// Full flashblocks sequence execution from a new block height. + fn execute_fresh( + &self, + base: OpFlashblockPayloadBase, + transactions: Vec>>, + last_flashblock_index: u64, + ) -> eyre::Result<()> { + let parent_hash = base.parent_hash; + + // Prioritize trying to get parent hash state from canonical provider first. If + // the parent is not in the canonical chain, then try building fresh on top of + // the current pending sequence (current pending promoted to confirm, incoming + // sequence is the next height). Fall back to the flashblocks overlay via + // `get_pending_state_provider`. + let (state_provider, parent_header) = match self.provider.history_by_block_hash(parent_hash) + { + Ok(canon_provider) => { + let header = self + .provider + .sealed_header_by_hash(parent_hash)? + .ok_or_else(|| eyre::eyre!("parent header not found for hash {parent_hash}"))?; + (canon_provider, header) + } + Err(err) => { + trace!( + target: "flashblocks", + error = %err, + "parent not in canonical chain, try getting state from pending state", + ); + let canonical_state = self.provider.latest()?; + self.flashblocks_state.get_pending_state_provider(canonical_state).ok_or_else( + || { + eyre::eyre!( + "parent {parent_hash} not in canonical chain and no \ + pending state available for overlay" + ) + }, + )? + } + }; + + let mut request_cache = CachedReads::default(); + let cached_db = + request_cache.as_db_mut(StateProviderDatabase::new(state_provider.as_ref())); + let mut state = State::builder().with_database(cached_db).with_bundle_update().build(); + + let mut builder = self + .evm_config + .builder_for_next_block(&mut state, &parent_header, base.clone().into()) + .map_err(RethError::other)?; + builder.apply_pre_execution_changes()?; + for tx in &transactions { + builder.execute_transaction(tx.clone())?; + } + let BlockBuilderOutcome { execution_result, block, hashed_state, .. } = + builder.finish(state_provider.as_ref())?; + let bundle = state.take_bundle(); + + self.commit_pending_sequence( + base, + transactions, + last_flashblock_index, + execution_result, + block, + hashed_state, + bundle, + request_cache, + ) + } + + /// Incremental execution for the same block height as the current pending. Reuses + /// the pending sequence's `BundleState` as prestate and its warm `CachedReads`, + /// executing only new unexecuted transactions from incremental flashblock payloads. + fn execute_incremental( + &self, + base: OpFlashblockPayloadBase, + transactions: Vec>>, + last_flashblock_index: u64, + pending: PendingSequence, + ) -> eyre::Result<()> { + if pending.last_flashblock_index != last_flashblock_index { + // State cache is polluted + warn!( + target: "flashblocks", + incoming_last_flashblock_index = last_flashblock_index, + pending_last_flashblock_index = pending.last_flashblock_index, + "state mismatch from incoming sequence to current pending tip", + ); + return Err(eyre::eyre!( + "state mismatch, last flashblock index mismatch pending index" + )); + } + + // Get latest canonical state, then overlay flashblocks state cache blocks + // from canonical height up to the parent hash. This handles the case where + // the parent is a flashblocks-confirmed block ahead of canonical. + let parent_hash = base.parent_hash; + let canonical_state = self.provider.latest()?; + let (state_provider, parent_header) = self + .flashblocks_state + .get_state_provider_by_hash(parent_hash, canonical_state) + .ok_or_else(|| { + eyre::eyre!("failed to build overlay state provider for parent {parent_hash}") + })?; + + // Extract prestate from current pending + let exec_output = &pending.pending.executed_block.execution_output; + let prestate_bundle = exec_output.state.clone(); + let cached_tx_count = + pending.pending.executed_block.recovered_block.body().transaction_count(); + let cached_receipts = exec_output.result.receipts.clone(); + let cached_gas_used = exec_output.result.gas_used; + let cached_blob_gas_used = exec_output.result.blob_gas_used; + + // Set up state DB with pending's warm CachedReads + prestate bundle + let mut request_cache = pending.cached_reads; + let cached_db = + request_cache.as_db_mut(StateProviderDatabase::new(state_provider.as_ref())); + let mut state = State::builder() + .with_database(cached_db) + .with_bundle_prestate(prestate_bundle) + .with_bundle_update() + .build(); + + let attrs = base.clone().into(); + let evm_env = + self.evm_config.next_evm_env(&parent_header, &attrs).map_err(RethError::other)?; + let execution_ctx = self + .evm_config + .context_for_next_block(&parent_header, attrs) + .map_err(RethError::other)?; + + // Skip apply_pre_execution_changes (already applied in the original fresh build). + // The only pre-execution effect we need is set_state_clear_flag, which configures EVM + // empty-account handling (OP Stack chains activate Spurious Dragon at genesis, so + // this is always true). + state.set_state_clear_flag(true); + let evm = self.evm_config.evm_with_env(&mut state, evm_env); + let mut executor = self.evm_config.create_executor(evm, execution_ctx.clone()); + + for tx in transactions.iter().skip(cached_tx_count).cloned() { + executor.execute_transaction(tx)?; + } + + let (evm, execution_result) = executor.finish()?; + let (db, evm_env) = evm.finish(); + db.merge_transitions(BundleRetention::Reverts); + + let execution_result = Self::merge_cached_block_execution_results( + cached_receipts, + cached_gas_used, + cached_blob_gas_used, + execution_result, + ); + + // Compute state root via sparse trie + let hashed_state = state_provider.hashed_post_state(&db.bundle_state); + let (state_root, _) = state_provider + .state_root_with_updates(hashed_state.clone()) + .map_err(RethError::other)?; + let bundle = db.take_bundle(); + + // Assemble block + let (block_transactions, senders): (Vec<_>, Vec<_>) = + transactions.iter().map(|tx| tx.1.clone().into_parts()).unzip(); + let block = self + .evm_config + .block_assembler() + .assemble_block(BlockAssemblerInput::new( + evm_env, + execution_ctx, + &parent_header, + block_transactions, + &execution_result, + &bundle, + state_provider.as_ref(), + state_root, + )) + .map_err(RethError::other)?; + let block = RecoveredBlock::new_unhashed(block, senders); + + self.commit_pending_sequence( + base, + transactions, + last_flashblock_index, + execution_result, + block, + hashed_state, + bundle, + request_cache, + ) + } + + /// Builds a [`PendingSequence`] and commits it to the flashblocks state cache. + #[expect(clippy::too_many_arguments)] + fn commit_pending_sequence( + &self, + base: OpFlashblockPayloadBase, + transactions: Vec>>, + last_flashblock_index: u64, + execution_result: BlockExecutionResult, + block: RecoveredBlock, + hashed_state: HashedPostState, + bundle: BundleState, + request_cache: CachedReads, + ) -> eyre::Result<()> { + let block_hash = block.hash(); + let parent_hash = base.parent_hash; + + // Build pending execution block + let execution_outcome = + Arc::new(BlockExecutionOutput { state: bundle, result: execution_result }); + let executed_block = ExecutedBlock::new( + block.into(), + execution_outcome.clone(), + ComputedTrieData::without_trie_input( + Arc::new(hashed_state.into_sorted()), + Arc::default(), + ), + ); + let pending_block = PendingBlock::with_executed_block( + Instant::now() + Duration::from_secs(1), + executed_block, + ); + + // Build tx index + let mut tx_index = HashMap::with_capacity(transactions.len()); + for (idx, tx) in transactions.iter().enumerate() { + tx_index.insert( + *tx.tx_hash(), + CachedTxInfo { + block_number: base.block_number, + block_hash, + tx_index: idx as u64, + tx: tx.1.clone().into_inner(), + receipt: execution_outcome.result.receipts[idx].clone(), + }, + ); + } + self.flashblocks_state.handle_pending_sequence(PendingSequence { + pending: pending_block, + tx_index, + cached_reads: request_cache, + block_hash, + parent_hash, + last_flashblock_index, + }) + } + + fn merge_cached_block_execution_results( + cached_receipts: Vec, + cached_gas_used: u64, + cached_blob_gas_used: u64, + mut execution_result: BlockExecutionResult, + ) -> BlockExecutionResult { + N::Receipt::add_cumulative_gas_offset(&mut execution_result.receipts, cached_gas_used); + let mut receipts = cached_receipts; + receipts.extend(execution_result.receipts); + BlockExecutionResult { + receipts, + requests: execution_result.requests, + gas_used: cached_gas_used.saturating_add(execution_result.gas_used), + blob_gas_used: cached_blob_gas_used.saturating_add(execution_result.blob_gas_used), + } + } +} diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index febf835f..a7610790 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -34,7 +34,6 @@ alloy-serde.workspace = true # op op-alloy-network.workspace = true -op-alloy-rpc-types.workspace = true # rpc jsonrpsee.workspace = true From 56910dbfd1ab1dd80ef89578255b6367855ac50b Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 18 Mar 2026 07:58:01 +0800 Subject: [PATCH 40/76] Revert "feat: revamp flashblocks execution logic, use sync SR calc first" This reverts commit 079ecb7279281461973f4615357067a3213c4157. --- Cargo.lock | 13 +- Cargo.toml | 2 +- crates/flashblocks/Cargo.toml | 1 - crates/flashblocks/src/cache/confirm.rs | 44 +- crates/flashblocks/src/cache/mod.rs | 22 +- crates/flashblocks/src/cache/pending.rs | 16 + crates/flashblocks/src/execution/mod.rs | 3 +- crates/flashblocks/src/execution/worker.rs | 833 +++++++++++++-------- crates/flashblocks/src/lib.rs | 2 +- 9 files changed, 586 insertions(+), 350 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7be4e612..499fe18f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8073,7 +8073,7 @@ dependencies = [ "reth-node-api", "reth-primitives-traits", "reth-tracing", - "ringbuffer", + "ringbuffer 0.16.0", "serde", "serde_json", "tokio", @@ -9554,7 +9554,7 @@ dependencies = [ "reth-rpc-eth-types", "reth-storage-api", "reth-tasks", - "ringbuffer", + "ringbuffer 0.16.0", "serde_json", "tokio", "tokio-tungstenite 0.28.0", @@ -11028,6 +11028,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "ringbuffer" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3df6368f71f205ff9c33c076d170dd56ebf68e8161c733c0caa07a7a5509ed53" + [[package]] name = "ringbuffer" version = "0.16.0" @@ -14215,8 +14221,7 @@ dependencies = [ "reth-storage-api", "reth-tasks", "reth-tracing", - "reth-trie-common", - "ringbuffer", + "ringbuffer 0.15.0", "serde", "serde_json", "test-case", diff --git a/Cargo.toml b/Cargo.toml index 09f3bcf4..08af2518 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -242,7 +242,7 @@ tracing = { version = "0.1.41" } shellexpand = "3.1" url = "2.5" brotli = "8.0" -ringbuffer = "=0.16.0" +ringbuffer = "0.15" # p2p libp2p = { version = "0.56", features = ["identify", "ping", "noise", "tcp", "autonat", "mdns", "tokio", "cbor", "macros", "yamux", "dns"] } diff --git a/crates/flashblocks/Cargo.toml b/crates/flashblocks/Cargo.toml index 7eb7569b..3c47a756 100644 --- a/crates/flashblocks/Cargo.toml +++ b/crates/flashblocks/Cargo.toml @@ -33,7 +33,6 @@ reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-optimism-flashblocks.workspace = true reth-storage-api.workspace = true -reth-trie-common.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-tracing.workspace = true diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index f99a1f83..512f9525 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -16,17 +16,17 @@ const DEFAULT_CONFIRM_BLOCK_CACHE_SIZE: usize = 1_000; const DEFAULT_TX_CACHE_SIZE: usize = DEFAULT_CONFIRM_BLOCK_CACHE_SIZE * 10_000; #[derive(Debug)] -pub(crate) struct ConfirmedBlock { +pub struct ConfirmedBlock { /// The locally built pending block with execution output. - pub(crate) executed_block: ExecutedBlock, + pub executed_block: ExecutedBlock, /// The receipts for the pending block - pub(crate) receipts: Arc>>, + pub receipts: Arc>>, } impl ConfirmedBlock { /// Returns a pair of [`RecoveredBlock`] and a vector of [`NodePrimitives::Receipt`]s by /// cloning from borrowed self. - pub(crate) fn to_block_and_receipts(&self) -> BlockAndReceipts { + pub fn to_block_and_receipts(&self) -> BlockAndReceipts { BlockAndReceipts { block: self.executed_block.recovered_block.clone(), receipts: self.receipts.clone(), @@ -45,7 +45,7 @@ impl ConfirmedBlock { /// Transaction data is stored in a `HashMap` which indexes transaction hashes to /// [`CachedTxInfo`] for O(1) tx/receipt lookups. #[derive(Debug)] -pub(crate) struct ConfirmCache { +pub struct ConfirmCache { /// Primary storage: block number → (block hash, block + receipts). /// `BTreeMap` ordering enables efficient range-based flush via `split_off`. blocks: BTreeMap)>, @@ -63,7 +63,7 @@ impl Default for ConfirmCache { impl ConfirmCache { /// Creates a new [`ConfirmCache`]. - pub(crate) fn new() -> Self { + pub fn new() -> Self { Self { blocks: BTreeMap::new(), hash_to_number: HashMap::with_capacity(DEFAULT_CONFIRM_BLOCK_CACHE_SIZE), @@ -72,17 +72,17 @@ impl ConfirmCache { } /// Returns the number of cached entries. - pub(crate) fn len(&self) -> usize { + pub fn len(&self) -> usize { self.blocks.len() } /// Returns `true` if the cache is empty. - pub(crate) fn is_empty(&self) -> bool { + pub fn is_empty(&self) -> bool { self.blocks.is_empty() } /// Inserts a confirmed block into the cache, indexed by block number and block hash. - pub(crate) fn insert( + pub fn insert( &mut self, height: u64, executed_block: ExecutedBlock, @@ -118,37 +118,34 @@ impl ConfirmCache { } /// Clears all entries. - pub(crate) fn clear(&mut self) { + pub fn clear(&mut self) { self.tx_index.clear(); self.blocks.clear(); self.hash_to_number.clear(); } /// Returns the block number for the given block hash, if cached. - pub(crate) fn number_for_hash(&self, block_hash: &B256) -> Option { + pub fn number_for_hash(&self, block_hash: &B256) -> Option { self.hash_to_number.get(block_hash).copied() } /// Returns the block hash for the given block number, if cached. - pub(crate) fn hash_for_number(&self, block_number: u64) -> Option { + pub fn hash_for_number(&self, block_number: u64) -> Option { self.blocks.get(&block_number).map(|(hash, _)| *hash) } /// Returns the confirmed block for the given block hash, if present. - pub(crate) fn get_block_by_hash(&self, block_hash: &B256) -> Option> { + pub fn get_block_by_hash(&self, block_hash: &B256) -> Option> { self.get_block_by_number(self.number_for_hash(block_hash)?) } /// Returns the confirmed block for the given block number, if present. - pub(crate) fn get_block_by_number(&self, block_number: u64) -> Option> { + pub fn get_block_by_number(&self, block_number: u64) -> Option> { self.blocks.get(&block_number).map(|(_, entry)| entry.to_block_and_receipts()) } /// Returns the cached transaction info for the given tx hash, if present. - pub(crate) fn get_tx_info( - &self, - tx_hash: &TxHash, - ) -> Option<(CachedTxInfo, BlockAndReceipts)> { + pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option<(CachedTxInfo, BlockAndReceipts)> { let tx_info = self.tx_index.get(tx_hash).cloned()?; let block = self.get_block_by_number(tx_info.block_number)?; Some((tx_info, block)) @@ -158,7 +155,7 @@ impl ConfirmCache { /// ordered newest to oldest (for use with `MemoryOverlayStateProvider`). /// /// Returns an error if state cache pollution detected (non-contiguous blocks). - pub(crate) fn get_executed_blocks_up_to_height( + pub fn get_executed_blocks_up_to_height( &self, target_height: u64, canon_height: u64, @@ -193,10 +190,7 @@ impl ConfirmCache { } /// Removes and returns the confirmed block for the given block number. - pub(crate) fn remove_block_by_number( - &mut self, - block_number: u64, - ) -> Option> { + pub fn remove_block_by_number(&mut self, block_number: u64) -> Option> { let (hash, block) = self.blocks.remove(&block_number)?; self.hash_to_number.remove(&hash); self.remove_tx_index_for_block(&block); @@ -204,7 +198,7 @@ impl ConfirmCache { } /// Removes and returns the confirmed block for the given block hash. - pub(crate) fn remove_block_by_hash(&mut self, block_hash: &B256) -> Option> { + pub fn remove_block_by_hash(&mut self, block_hash: &B256) -> Option> { let number = self.hash_to_number.remove(block_hash)?; let (_, block) = self.blocks.remove(&number)?; self.remove_tx_index_for_block(&block); @@ -222,7 +216,7 @@ impl ConfirmCache { /// /// Called when the canonical chain catches up to the confirmed cache. Returns /// the number of entries flushed. - pub(crate) fn flush_up_to_height(&mut self, canon_height: u64) -> usize { + pub fn flush_up_to_height(&mut self, canon_height: u64) -> usize { let retained = self.blocks.split_off(&(canon_height + 1)); let stale = std::mem::replace(&mut self.blocks, retained); let count = stale.len(); diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 6683ae72..17b99eb7 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -3,10 +3,9 @@ pub mod pending; pub(crate) mod raw; pub(crate) mod utils; -pub(crate) use confirm::ConfirmCache; -pub(crate) use raw::RawFlashblocksCache; - +pub use confirm::ConfirmCache; pub use pending::PendingSequence; +pub use raw::RawFlashblocksCache; use crate::{FlashblockCachedReceipt, PendingSequenceRx}; use parking_lot::RwLock; @@ -87,11 +86,6 @@ where self.inner.read().pending_cache.as_ref().map(|p| p.get_height()) } - /// Returns a clone of the current pending sequence, if any. - pub fn get_pending_sequence(&self) -> Option> { - self.inner.read().pending_cache.clone() - } - pub fn get_rpc_block_by_id(&self, block_id: Option) -> Option> { match block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)) { BlockId::Number(id) => self.get_rpc_block(id), @@ -169,18 +163,6 @@ where let in_memory = guard.get_state_provider_at_height(block_num, canonical_state)?; Some((in_memory, block.clone_sealed_header())) } - - pub fn get_state_provider_by_hash( - &self, - block_hash: B256, - canonical_state: StateProviderBox, - ) -> Option<(StateProviderBox, SealedHeaderFor)> { - let mut guard = self.inner.write(); - let block = guard.get_block_by_hash(&block_hash)?.block; - let block_num = block.number(); - let in_memory = guard.get_state_provider_at_height(block_num, canonical_state)?; - Some((in_memory, block.clone_sealed_header())) - } } // FlashblockStateCache state mutation interfaces. diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs index 43408424..27d49d00 100644 --- a/crates/flashblocks/src/cache/pending.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -28,6 +28,14 @@ where pub parent_hash: B256, /// The last flashblock index of the latest flashblocks sequence. pub last_flashblock_index: u64, + /// Cached number of transactions covered by the pending sequence execution. + cached_tx_count: usize, + /// Cached receipts for the prefix. + pub cached_receipts: Vec, + /// Total gas used by the pending sequence. + pub cached_gas_used: u64, + /// Total blob/DA gas used by the pending sequence. + pub cached_blob_gas_used: u64, } impl PendingSequence @@ -81,6 +89,10 @@ mod tests { block_hash, parent_hash, last_flashblock_index: 0, + cached_tx_count: 0, + cached_receipts: vec![], + cached_gas_used: 0, + cached_blob_gas_used: 0, } } @@ -115,6 +127,10 @@ mod tests { block_hash, parent_hash, last_flashblock_index: 0, + cached_tx_count: 0, + cached_receipts: vec![], + cached_gas_used: 0, + cached_blob_gas_used: 0, } } diff --git a/crates/flashblocks/src/execution/mod.rs b/crates/flashblocks/src/execution/mod.rs index 89adac12..70904c40 100644 --- a/crates/flashblocks/src/execution/mod.rs +++ b/crates/flashblocks/src/execution/mod.rs @@ -10,7 +10,7 @@ pub(crate) struct BuildArgs { } /// Receipt requirements for cache-resume flow. -pub trait FlashblockCachedReceipt: Clone { +pub(crate) trait FlashblockCachedReceipt: Clone { /// Adds `gas_offset` to each receipt's `cumulative_gas_used`. fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64); } @@ -20,6 +20,7 @@ impl FlashblockCachedReceipt for OpReceipt { if gas_offset == 0 { return; } + for receipt in receipts { let inner = receipt.as_receipt_mut(); inner.cumulative_gas_used = inner.cumulative_gas_used.saturating_add(gas_offset); diff --git a/crates/flashblocks/src/execution/worker.rs b/crates/flashblocks/src/execution/worker.rs index 5f7d9e05..973bc419 100644 --- a/crates/flashblocks/src/execution/worker.rs +++ b/crates/flashblocks/src/execution/worker.rs @@ -1,16 +1,16 @@ use crate::{ - cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}, - execution::BuildArgs, - FlashblockCachedReceipt, + cache::{FlashblockStateCache, PendingSequence}, + BuildArgs, FlashblockCachedReceipt, }; -use alloy_eips::eip2718::WithEncoded; -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; use std::{ - collections::HashMap, sync::Arc, time::{Duration, Instant}, }; -use tracing::*; +use tracing::trace; + +use alloy_eips::{eip2718::WithEncoded, BlockNumberOrTag}; +use alloy_primitives::B256; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; use reth_chain_state::{ComputedTrieData, ExecutedBlock}; use reth_errors::RethError; @@ -22,26 +22,22 @@ use reth_evm::{ }; use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; use reth_primitives_traits::{ - transaction::TxHashRef, BlockBody, HeaderTy, NodePrimitives, Recovered, RecoveredBlock, + transaction::TxHashRef, AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy, + Recovered, RecoveredBlock, SealedHeader, }; use reth_revm::{ - cached::CachedReads, database::StateProviderDatabase, db::{states::bundle_state::BundleRetention, BundleState, State}, }; -use reth_rpc_eth_types::PendingBlock; +use reth_rpc_eth_types::{EthApiError, PendingBlock}; use reth_storage_api::{ - HashedPostStateProvider, HeaderProvider, StateProviderFactory, StateRootProvider, + noop::NoopProvider, BlockReaderIdExt, HashedPostStateProvider, StateProviderFactory, + StateRootProvider, }; -use reth_trie_common::HashedPostState; -/// Builds the [`PendingSequence`]s from the accumulated flashblock transaction sequences. -/// Commits results directly to [`FlashblockStateCache`] via `handle_pending_sequence()`. +/// The `FlashblocksValidator` builds [`PendingBlock`] out of a sequence of transactions. /// -/// Supports two execution modes: -/// - **Fresh**: Full execution for a new block height. -/// - **Incremental**: Suffix-only execution reusing cached prefix state from an existing -/// pending sequence at the same height. +/// Owns a [`TransactionCache`] for incremental prefix caching between flashblock builds. #[derive(Debug)] pub(crate) struct FlashblockSequenceValidator where @@ -52,6 +48,7 @@ where /// The canonical chainstate provider. provider: Provider, /// The flashblocks state cache containing the flashblocks state cache layer. + /// state cache layer. flashblocks_state: FlashblockStateCache, } @@ -77,320 +74,562 @@ where N: NodePrimitives, N::Receipt: FlashblockCachedReceipt, EvmConfig: ConfigureEvm + Unpin>, - Provider: StateProviderFactory + HeaderProvider
> + Unpin, + Provider: StateProviderFactory + + BlockReaderIdExt< + Header = HeaderTy, + Block = BlockTy, + Transaction = N::SignedTx, + Receipt = ReceiptTy, + > + Unpin, { - /// Executes a flashblock transaction sequence and commits the result to the flashblocks - /// state cache. Note that the flashblocks sequence validator should be the only handle - /// that advances the flashblocks state cache tip. + /// Returns the [`PendingSequence`], which contains the full built execution state of + /// the flashblocks sequence passed in `BuildArgs`. + /// + /// The /// - /// Determines execution mode from the current pending state: - /// - No pending sequence exists, cache not yet initialized → fresh build. - /// - Pending is at a different height → fresh build. - /// - If pending exists at the same height → incremental build. + /// In canonical mode, the internal transaction cache is used to resume from + /// cached state if the transaction list is a continuation of what was previously + /// executed. + /// + /// Returns `None` if: + /// - In canonical mode: flashblock doesn't attach to the latest header + /// - In speculative mode: no pending parent state provided pub(crate) fn execute>>>( &mut self, - args: BuildArgs, + mut args: BuildArgs, ) -> eyre::Result<()> { - let block_number = args.base.block_number; - let transactions: Vec<_> = args.transactions.into_iter().collect(); + trace!(target: "flashblocks", "Attempting new pending block from flashblocks"); - // Determine execution mode from pending state - let pending = self.flashblocks_state.get_pending_sequence(); - let pending_height = pending.as_ref().map(|p| p.get_height()); - let incremental = pending_height == Some(block_number); + let parent_hash = args.base.parent_hash; + let parent_header = self.state_cache.latest_header(parent_hash)?; + let state_provider = self.state_cache.history_by_block_hash(parent_header.hash())?; - // Validate height continuity - if let Some(pending_height) = pending_height - && block_number != pending_height - && block_number != pending_height + 1 - { - // State cache is polluted - warn!( + let latest = self + .provider + .latest_header()? + .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; + let latest_hash = latest.hash(); + + // Determine build mode: canonical (parent is local tip) or speculative (parent is pending) + let is_canonical = args.base.parent_hash == latest_hash; + let has_pending_parent = args.pending_parent.is_some(); + + if !is_canonical && !has_pending_parent { + trace!( target: "flashblocks", - incoming_height = block_number, - pending_height = pending_height, - "state mismatch from incoming sequence to current pending tip", + flashblock_parent = ?args.base.parent_hash, + local_latest = ?latest.num_hash(), + "Skipping non-consecutive flashblock (no pending parent available)" ); - return Err(eyre::eyre!( - "state mismatch from incoming sequence to current pending tip" - )); + return Ok(None); } - if incremental { - self.execute_incremental( - args.base, - transactions, - args.last_flashblock_index, - pending.unwrap(), + // Collect transactions and extract hashes for cache lookup + let transactions: Vec<_> = args.transactions.into_iter().collect(); + let tx_hashes: Vec = transactions.iter().map(|tx| *tx.tx_hash()).collect(); + + // Get state provider and parent header context. + // For speculative builds, use the canonical anchor hash (not the pending parent hash) + // for storage reads, but execute with the pending parent's sealed header context. + let (state_provider, canonical_anchor, parent_header) = if is_canonical { + (self.provider.history_by_block_hash(latest.hash())?, latest.hash(), &latest) + } else { + // For speculative building, we need to use the canonical anchor + // and apply the pending state's bundle on top of it + let pending = args.pending_parent.as_ref().unwrap(); + let Some(parent_header) = pending.sealed_header.as_ref() else { + trace!( + target: "flashblocks", + pending_block_number = pending.block_number, + pending_block_hash = ?pending.block_hash, + "Skipping speculative build: pending parent header is unavailable" + ); + return Ok(None); + }; + if !is_consistent_speculative_parent_hashes( + args.base.parent_hash, + pending.block_hash, + parent_header.hash(), + ) { + trace!( + target: "flashblocks", + incoming_parent_hash = ?args.base.parent_hash, + pending_block_hash = ?pending.block_hash, + pending_sealed_hash = ?parent_header.hash(), + pending_block_number = pending.block_number, + "Skipping speculative build: inconsistent pending parent hashes" + ); + return Ok(None); + } + trace!( + target: "flashblocks", + pending_block_number = pending.block_number, + pending_block_hash = ?pending.block_hash, + canonical_anchor = ?pending.canonical_anchor_hash, + "Building speculatively on pending state" + ); + ( + self.provider.history_by_block_hash(pending.canonical_anchor_hash)?, + pending.canonical_anchor_hash, + parent_header, ) + }; + + // Set up cached reads + let cache_key = if is_canonical { latest_hash } else { args.base.parent_hash }; + let mut request_cache = args + .cached_state + .take() + .filter(|(hash, _)| hash == &cache_key) + .map(|(_, state)| state) + .unwrap_or_else(|| { + // For speculative builds, use cached reads from pending parent + args.pending_parent.as_ref().map(|p| p.cached_reads.clone()).unwrap_or_default() + }); + + let cached_db = request_cache.as_db_mut(StateProviderDatabase::new(&state_provider)); + + // Check for resumable canonical execution state. + let canonical_parent_hash = args.base.parent_hash; + let cached_prefix = if is_canonical { + self.tx_cache + .get_resumable_state_with_execution_meta_for_parent( + args.base.block_number, + canonical_parent_hash, + &tx_hashes, + ) + .map(|(bundle, receipts, _requests, gas_used, blob_gas_used, cached_tx_count)| { + trace!( + target: "flashblocks", + cached_tx_count, + total_txs = tx_hashes.len(), + "Cache hit (executing only uncached suffix)" + ); + CachedPrefixExecutionResult { + cached_tx_count, + bundle: bundle.clone(), + receipts: receipts.to_vec(), + gas_used, + blob_gas_used, + } + }) } else { - self.execute_fresh(args.base, transactions, args.last_flashblock_index) - } - } + None + }; - /// Full flashblocks sequence execution from a new block height. - fn execute_fresh( - &self, - base: OpFlashblockPayloadBase, - transactions: Vec>>, - last_flashblock_index: u64, - ) -> eyre::Result<()> { - let parent_hash = base.parent_hash; - - // Prioritize trying to get parent hash state from canonical provider first. If - // the parent is not in the canonical chain, then try building fresh on top of - // the current pending sequence (current pending promoted to confirm, incoming - // sequence is the next height). Fall back to the flashblocks overlay via - // `get_pending_state_provider`. - let (state_provider, parent_header) = match self.provider.history_by_block_hash(parent_hash) + // Build state with appropriate prestate + // - Speculative builds use pending parent prestate + // - Canonical cache-hit builds use cached prefix prestate + let mut state = if let Some(ref pending) = args.pending_parent { + State::builder() + .with_database(cached_db) + .with_bundle_prestate(pending.execution_outcome.state.clone()) + .with_bundle_update() + .build() + } else if let Some(ref cached_prefix) = cached_prefix { + State::builder() + .with_database(cached_db) + .with_bundle_prestate(cached_prefix.bundle.clone()) + .with_bundle_update() + .build() + } else { + State::builder().with_database(cached_db).with_bundle_update().build() + }; + + let (execution_result, block, hashed_state, bundle) = if let Some(cached_prefix) = + cached_prefix { - Ok(canon_provider) => { - let header = self - .provider - .sealed_header_by_hash(parent_hash)? - .ok_or_else(|| eyre::eyre!("parent header not found for hash {parent_hash}"))?; - (canon_provider, header) + // Cached prefix execution model: + // - The cached bundle prestate already includes pre-execution state changes + // (blockhash/beacon root updates, create2deployer), so we do NOT call + // apply_pre_execution_changes() again. + // - The only pre-execution effect we need is set_state_clear_flag, which configures EVM + // empty-account handling (OP Stack chains activate Spurious Dragon at genesis, so + // this is always true). + // - Suffix transactions execute against the warm prestate. + // - Post-execution (finish()) runs once on the suffix executor, producing correct + // results for the full block. For OP Stack post-merge, the + // post_block_balance_increments are empty (no block rewards, no ommers, no + // withdrawals passed), so finish() only seals execution state. + let attrs = args.base.clone().into(); + let evm_env = + self.evm_config.next_evm_env(parent_header, &attrs).map_err(RethError::other)?; + let execution_ctx = self + .evm_config + .context_for_next_block(parent_header, attrs) + .map_err(RethError::other)?; + + // The cached bundle prestate already includes pre-execution state changes. + // Only set the state clear flag (Spurious Dragon empty-account handling). + state.set_state_clear_flag(true); + let evm = self.evm_config.evm_with_env(&mut state, evm_env); + let mut executor = self.evm_config.create_executor(evm, execution_ctx.clone()); + + for tx in transactions.iter().skip(cached_prefix.cached_tx_count).cloned() { + let _gas_used = executor.execute_transaction(tx)?; } - Err(err) => { - trace!( - target: "flashblocks", - error = %err, - "parent not in canonical chain, try getting state from pending state", - ); - let canonical_state = self.provider.latest()?; - self.flashblocks_state.get_pending_state_provider(canonical_state).ok_or_else( - || { - eyre::eyre!( - "parent {parent_hash} not in canonical chain and no \ - pending state available for overlay" - ) - }, - )? + + let (evm, suffix_execution_result) = executor.finish()?; + let (db, evm_env) = evm.finish(); + db.merge_transitions(BundleRetention::Reverts); + + let execution_result = + Self::merge_cached_and_suffix_results(cached_prefix, suffix_execution_result); + + let (hashed_state, state_root) = if args.compute_state_root { + trace!(target: "flashblocks", "Computing block state root"); + let hashed_state = state_provider.hashed_post_state(&db.bundle_state); + let (state_root, _) = state_provider + .state_root_with_updates(hashed_state.clone()) + .map_err(RethError::other)?; + (hashed_state, state_root) + } else { + let noop_provider = NoopProvider::default(); + let hashed_state = noop_provider.hashed_post_state(&db.bundle_state); + let (state_root, _) = noop_provider + .state_root_with_updates(hashed_state.clone()) + .map_err(RethError::other)?; + (hashed_state, state_root) + }; + let bundle = db.take_bundle(); + + let (block_transactions, senders): (Vec<_>, Vec<_>) = + transactions.iter().map(|tx| tx.1.clone().into_parts()).unzip(); + let block = self + .evm_config + .block_assembler() + .assemble_block(BlockAssemblerInput::new( + evm_env, + execution_ctx, + parent_header, + block_transactions, + &execution_result, + &bundle, + &state_provider, + state_root, + )) + .map_err(RethError::other)?; + let block = RecoveredBlock::new_unhashed(block, senders); + + (execution_result, block, hashed_state, bundle) + } else { + let mut builder = self + .evm_config + .builder_for_next_block(&mut state, parent_header, args.base.clone().into()) + .map_err(RethError::other)?; + + builder.apply_pre_execution_changes()?; + + for tx in transactions { + let _gas_used = builder.execute_transaction(tx)?; } + + let BlockBuilderOutcome { execution_result, block, hashed_state, .. } = + if args.compute_state_root { + trace!(target: "flashblocks", "Computing block state root"); + builder.finish(&state_provider)? + } else { + builder.finish(NoopProvider::default())? + }; + let bundle = state.take_bundle(); + + (execution_result, block, hashed_state, bundle) }; - let mut request_cache = CachedReads::default(); - let cached_db = - request_cache.as_db_mut(StateProviderDatabase::new(state_provider.as_ref())); - let mut state = State::builder().with_database(cached_db).with_bundle_update().build(); - - let mut builder = self - .evm_config - .builder_for_next_block(&mut state, &parent_header, base.clone().into()) - .map_err(RethError::other)?; - builder.apply_pre_execution_changes()?; - for tx in &transactions { - builder.execute_transaction(tx.clone())?; + // Update internal transaction cache (only in canonical mode) + if is_canonical { + self.tx_cache.update_with_execution_meta_for_parent( + args.base.block_number, + canonical_parent_hash, + tx_hashes, + bundle.clone(), + execution_result.receipts.clone(), + CachedExecutionMeta { + requests: execution_result.requests.clone(), + gas_used: execution_result.gas_used, + blob_gas_used: execution_result.blob_gas_used, + }, + ); } - let BlockBuilderOutcome { execution_result, block, hashed_state, .. } = - builder.finish(state_provider.as_ref())?; - let bundle = state.take_bundle(); - - self.commit_pending_sequence( - base, - transactions, - last_flashblock_index, - execution_result, - block, - hashed_state, - bundle, - request_cache, - ) - } - /// Incremental execution for the same block height as the current pending. Reuses - /// the pending sequence's `BundleState` as prestate and its warm `CachedReads`, - /// executing only new unexecuted transactions from incremental flashblock payloads. - fn execute_incremental( - &self, - base: OpFlashblockPayloadBase, - transactions: Vec>>, - last_flashblock_index: u64, - pending: PendingSequence, - ) -> eyre::Result<()> { - if pending.last_flashblock_index != last_flashblock_index { - // State cache is polluted - warn!( + let execution_outcome = BlockExecutionOutput { state: bundle, result: execution_result }; + let execution_outcome = Arc::new(execution_outcome); + + // Create pending state for subsequent builds. + // Use the locally built block hash for both parent matching and speculative + // execution context to avoid split-hash ambiguity. + let local_block_hash = block.hash(); + if local_block_hash != args.last_flashblock_hash { + trace!( target: "flashblocks", - incoming_last_flashblock_index = last_flashblock_index, - pending_last_flashblock_index = pending.last_flashblock_index, - "state mismatch from incoming sequence to current pending tip", + local_block_hash = ?local_block_hash, + sequencer_block_hash = ?args.last_flashblock_hash, + block_number = block.number(), + "Local block hash differs from sequencer-provided hash; speculative chaining will follow local hash" ); - return Err(eyre::eyre!( - "state mismatch, last flashblock index mismatch pending index" - )); - } - - // Get latest canonical state, then overlay flashblocks state cache blocks - // from canonical height up to the parent hash. This handles the case where - // the parent is a flashblocks-confirmed block ahead of canonical. - let parent_hash = base.parent_hash; - let canonical_state = self.provider.latest()?; - let (state_provider, parent_header) = self - .flashblocks_state - .get_state_provider_by_hash(parent_hash, canonical_state) - .ok_or_else(|| { - eyre::eyre!("failed to build overlay state provider for parent {parent_hash}") - })?; - - // Extract prestate from current pending - let exec_output = &pending.pending.executed_block.execution_output; - let prestate_bundle = exec_output.state.clone(); - let cached_tx_count = - pending.pending.executed_block.recovered_block.body().transaction_count(); - let cached_receipts = exec_output.result.receipts.clone(); - let cached_gas_used = exec_output.result.gas_used; - let cached_blob_gas_used = exec_output.result.blob_gas_used; - - // Set up state DB with pending's warm CachedReads + prestate bundle - let mut request_cache = pending.cached_reads; - let cached_db = - request_cache.as_db_mut(StateProviderDatabase::new(state_provider.as_ref())); - let mut state = State::builder() - .with_database(cached_db) - .with_bundle_prestate(prestate_bundle) - .with_bundle_update() - .build(); - - let attrs = base.clone().into(); - let evm_env = - self.evm_config.next_evm_env(&parent_header, &attrs).map_err(RethError::other)?; - let execution_ctx = self - .evm_config - .context_for_next_block(&parent_header, attrs) - .map_err(RethError::other)?; - - // Skip apply_pre_execution_changes (already applied in the original fresh build). - // The only pre-execution effect we need is set_state_clear_flag, which configures EVM - // empty-account handling (OP Stack chains activate Spurious Dragon at genesis, so - // this is always true). - state.set_state_clear_flag(true); - let evm = self.evm_config.evm_with_env(&mut state, evm_env); - let mut executor = self.evm_config.create_executor(evm, execution_ctx.clone()); - - for tx in transactions.iter().skip(cached_tx_count).cloned() { - executor.execute_transaction(tx)?; } - - let (evm, execution_result) = executor.finish()?; - let (db, evm_env) = evm.finish(); - db.merge_transitions(BundleRetention::Reverts); - - let execution_result = Self::merge_cached_block_execution_results( - cached_receipts, - cached_gas_used, - cached_blob_gas_used, - execution_result, - ); - - // Compute state root via sparse trie - let hashed_state = state_provider.hashed_post_state(&db.bundle_state); - let (state_root, _) = state_provider - .state_root_with_updates(hashed_state.clone()) - .map_err(RethError::other)?; - let bundle = db.take_bundle(); - - // Assemble block - let (block_transactions, senders): (Vec<_>, Vec<_>) = - transactions.iter().map(|tx| tx.1.clone().into_parts()).unzip(); - let block = self - .evm_config - .block_assembler() - .assemble_block(BlockAssemblerInput::new( - evm_env, - execution_ctx, - &parent_header, - block_transactions, - &execution_result, - &bundle, - state_provider.as_ref(), - state_root, - )) - .map_err(RethError::other)?; - let block = RecoveredBlock::new_unhashed(block, senders); - - self.commit_pending_sequence( - base, - transactions, - last_flashblock_index, - execution_result, - block, - hashed_state, - bundle, - request_cache, + let sealed_header = SealedHeader::new(block.header().clone(), local_block_hash); + let pending_state = PendingBlockState::new( + local_block_hash, + block.number(), + args.base.parent_hash, + canonical_anchor, + execution_outcome.clone(), + request_cache.clone(), ) - } + .with_sealed_header(sealed_header); - /// Builds a [`PendingSequence`] and commits it to the flashblocks state cache. - #[expect(clippy::too_many_arguments)] - fn commit_pending_sequence( - &self, - base: OpFlashblockPayloadBase, - transactions: Vec>>, - last_flashblock_index: u64, - execution_result: BlockExecutionResult, - block: RecoveredBlock, - hashed_state: HashedPostState, - bundle: BundleState, - request_cache: CachedReads, - ) -> eyre::Result<()> { - let block_hash = block.hash(); - let parent_hash = base.parent_hash; - - // Build pending execution block - let execution_outcome = - Arc::new(BlockExecutionOutput { state: bundle, result: execution_result }); - let executed_block = ExecutedBlock::new( - block.into(), - execution_outcome.clone(), - ComputedTrieData::without_trie_input( - Arc::new(hashed_state.into_sorted()), - Arc::default(), - ), - ); let pending_block = PendingBlock::with_executed_block( Instant::now() + Duration::from_secs(1), - executed_block, + ExecutedBlock::new( + block.into(), + execution_outcome, + ComputedTrieData::without_trie_input( + Arc::new(hashed_state.into_sorted()), + Arc::default(), + ), + ), + ); + let pending_flashblock = PendingFlashBlock::new( + pending_block, + canonical_anchor, + args.last_flashblock_index, + args.last_flashblock_hash, + args.compute_state_root, ); - // Build tx index - let mut tx_index = HashMap::with_capacity(transactions.len()); - for (idx, tx) in transactions.iter().enumerate() { - tx_index.insert( - *tx.tx_hash(), - CachedTxInfo { - block_number: base.block_number, - block_hash, - tx_index: idx as u64, - tx: tx.1.clone().into_inner(), - receipt: execution_outcome.result.receipts[idx].clone(), - }, - ); - } - self.flashblocks_state.handle_pending_sequence(PendingSequence { - pending: pending_block, - tx_index, - cached_reads: request_cache, - block_hash, - parent_hash, - last_flashblock_index, - }) + Ok(Some(BuildResult { pending_flashblock, cached_reads: request_cache, pending_state })) } - fn merge_cached_block_execution_results( - cached_receipts: Vec, - cached_gas_used: u64, - cached_blob_gas_used: u64, - mut execution_result: BlockExecutionResult, + fn merge_cached_and_suffix_results( + cached_prefix: CachedPrefixExecutionResult, + mut suffix_result: BlockExecutionResult, ) -> BlockExecutionResult { - N::Receipt::add_cumulative_gas_offset(&mut execution_result.receipts, cached_gas_used); - let mut receipts = cached_receipts; - receipts.extend(execution_result.receipts); + N::Receipt::add_cumulative_gas_offset(&mut suffix_result.receipts, cached_prefix.gas_used); + + let mut receipts = cached_prefix.receipts; + receipts.extend(suffix_result.receipts); + + // Use only suffix requests: the suffix executor's finish() produces + // post-execution requests from the complete block state (cached prestate + + // suffix changes). The cached prefix requests came from an intermediate + // state and must not be merged. + let requests = suffix_result.requests; + BlockExecutionResult { receipts, - requests: execution_result.requests, - gas_used: cached_gas_used.saturating_add(execution_result.gas_used), - blob_gas_used: cached_blob_gas_used.saturating_add(execution_result.blob_gas_used), + requests, + gas_used: cached_prefix.gas_used.saturating_add(suffix_result.gas_used), + blob_gas_used: cached_prefix.blob_gas_used.saturating_add(suffix_result.blob_gas_used), } } } + +#[inline] +fn is_consistent_speculative_parent_hashes( + incoming_parent_hash: B256, + pending_block_hash: B256, + pending_sealed_hash: B256, +) -> bool { + incoming_parent_hash == pending_block_hash && pending_block_hash == pending_sealed_hash +} + +// #[cfg(test)] +// mod tests { +// use super::{is_consistent_speculative_parent_hashes, BuildArgs, FlashBlockBuilder}; +// use crate::execution::cache::CachedExecutionMeta; +// use alloy_consensus::{SignableTransaction, TxEip1559}; +// use alloy_eips::eip2718::Encodable2718; +// use alloy_network::TxSignerSync; +// use alloy_primitives::{Address, StorageKey, StorageValue, TxKind, B256, U256}; +// use alloy_signer_local::PrivateKeySigner; +// use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; +// use op_revm::constants::L1_BLOCK_CONTRACT; +// use reth_optimism_chainspec::OP_MAINNET; +// use reth_optimism_evm::OpEvmConfig; +// use reth_optimism_primitives::{OpBlock, OpPrimitives, OpTransactionSigned}; +// use reth_primitives_traits::{AlloyBlockHeader, Recovered, SignerRecoverable}; +// use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; +// use reth_provider::ChainSpecProvider; +// use reth_storage_api::BlockReaderIdExt; +// use std::str::FromStr; + +// fn signed_transfer_tx( +// signer: &PrivateKeySigner, +// nonce: u64, +// recipient: Address, +// ) -> OpTransactionSigned { +// let mut tx = TxEip1559 { +// chain_id: 10, // OP Mainnet chain id +// nonce, +// gas_limit: 100_000, +// max_priority_fee_per_gas: 1_000_000_000, +// max_fee_per_gas: 2_000_000_000, +// to: TxKind::Call(recipient), +// value: U256::from(1), +// ..Default::default() +// }; +// let signature = signer.sign_transaction_sync(&mut tx).expect("signing tx succeeds"); +// tx.into_signed(signature).into() +// } + +// fn into_encoded_recovered( +// tx: OpTransactionSigned, +// signer: Address, +// ) -> alloy_eips::eip2718::WithEncoded> { +// let encoded = tx.encoded_2718(); +// Recovered::new_unchecked(tx, signer).into_encoded_with(encoded) +// } + +// #[test] +// fn speculative_parent_hashes_must_all_match() { +// let h = B256::repeat_byte(0x11); +// assert!(is_consistent_speculative_parent_hashes(h, h, h)); +// } + +// #[test] +// fn speculative_parent_hashes_reject_any_mismatch() { +// let incoming = B256::repeat_byte(0x11); +// let pending = B256::repeat_byte(0x22); +// let sealed = B256::repeat_byte(0x33); + +// assert!(!is_consistent_speculative_parent_hashes(incoming, pending, sealed)); +// assert!(!is_consistent_speculative_parent_hashes(incoming, incoming, sealed)); +// assert!(!is_consistent_speculative_parent_hashes(incoming, pending, pending)); +// } + +// #[test] +// fn canonical_build_reuses_cached_prefix_execution() { +// let provider = MockEthProvider::::new().with_chain_spec(OP_MAINNET.clone()); +// let genesis_hash = provider.chain_spec().genesis_hash(); +// let genesis_block = +// OpBlock::new(provider.chain_spec().genesis_header().clone(), Default::default()); +// provider.add_block(genesis_hash, genesis_block); + +// let recipient = Address::repeat_byte(0x22); +// let signer = PrivateKeySigner::random(); +// let tx_a = signed_transfer_tx(&signer, 0, recipient); +// let tx_b = signed_transfer_tx(&signer, 1, recipient); +// let tx_c = signed_transfer_tx(&signer, 2, recipient); +// let signer = tx_a.recover_signer().expect("tx signer recovery succeeds"); + +// provider.add_account(signer, ExtendedAccount::new(0, U256::from(1_000_000_000_000_000u64))); +// provider.add_account(recipient, ExtendedAccount::new(0, U256::ZERO)); +// provider.add_account( +// L1_BLOCK_CONTRACT, +// ExtendedAccount::new(1, U256::ZERO).extend_storage([ +// (StorageKey::with_last_byte(1), StorageValue::from(1_000_000_000u64)), +// (StorageKey::with_last_byte(5), StorageValue::from(188u64)), +// (StorageKey::with_last_byte(6), StorageValue::from(684_000u64)), +// ( +// StorageKey::with_last_byte(3), +// StorageValue::from_str( +// "0x0000000000000000000000000000000000001db0000d27300000000000000005", +// ) +// .expect("valid L1 fee scalar storage value"), +// ), +// ]), +// ); + +// let latest = provider +// .latest_header() +// .expect("provider latest header query succeeds") +// .expect("genesis header exists"); + +// let base = OpFlashblockPayloadBase { +// parent_hash: latest.hash(), +// parent_beacon_block_root: B256::ZERO, +// fee_recipient: Address::ZERO, +// prev_randao: B256::repeat_byte(0x55), +// block_number: latest.number() + 1, +// gas_limit: 30_000_000, +// timestamp: latest.timestamp() + 2, +// extra_data: Default::default(), +// base_fee_per_gas: U256::from(1_000_000_000u64), +// }; +// let base_parent_hash = base.parent_hash; + +// let tx_a_hash = B256::from(*tx_a.tx_hash()); +// let tx_b_hash = B256::from(*tx_b.tx_hash()); +// let tx_c_hash = B256::from(*tx_c.tx_hash()); + +// let tx_a = into_encoded_recovered(tx_a, signer); +// let tx_b = into_encoded_recovered(tx_b, signer); +// let tx_c = into_encoded_recovered(tx_c, signer); + +// let evm_config = OpEvmConfig::optimism(OP_MAINNET.clone()); +// let mut builder = FlashBlockBuilder::::new(evm_config, provider); + +// let first = builder +// .execute(BuildArgs { +// base: base.clone(), +// transactions: vec![tx_a.clone(), tx_b.clone()], +// cached_state: None, +// last_flashblock_index: 0, +// last_flashblock_hash: B256::repeat_byte(0xA0), +// compute_state_root: false, +// pending_parent: None, +// }) +// .expect("first build succeeds") +// .expect("first build is canonical"); + +// assert_eq!(first.pending_state.execution_outcome.result.receipts.len(), 2); + +// let cached_hashes = vec![tx_a_hash, tx_b_hash]; +// let (bundle, receipts, requests, gas_used, blob_gas_used, skip) = builder +// .tx_cache +// .get_resumable_state_with_execution_meta_for_parent( +// base.block_number, +// base_parent_hash, +// &cached_hashes, +// ) +// .expect("cache should contain first build execution state"); +// assert_eq!(skip, 2); + +// let mut tampered_receipts = receipts.to_vec(); +// tampered_receipts[0].as_receipt_mut().cumulative_gas_used = +// tampered_receipts[0].as_receipt().cumulative_gas_used.saturating_add(17); +// let expected_tampered_gas = tampered_receipts[0].as_receipt().cumulative_gas_used; + +// builder.tx_cache.update_with_execution_meta_for_parent( +// base.block_number, +// base_parent_hash, +// cached_hashes, +// bundle.clone(), +// tampered_receipts, +// CachedExecutionMeta { requests: requests.clone(), gas_used, blob_gas_used }, +// ); + +// let second_hashes = vec![tx_a_hash, tx_b_hash, tx_c_hash]; +// let (_, _, _, _, _, skip) = builder +// .tx_cache +// .get_resumable_state_with_execution_meta_for_parent( +// base.block_number, +// base_parent_hash, +// &second_hashes, +// ) +// .expect("second tx list should extend cached prefix"); +// assert_eq!(skip, 2); + +// let second = builder +// .execute(BuildArgs { +// base, +// transactions: vec![tx_a, tx_b, tx_c], +// cached_state: None, +// last_flashblock_index: 1, +// last_flashblock_hash: B256::repeat_byte(0xA1), +// compute_state_root: false, +// pending_parent: None, +// }) +// .expect("second build succeeds") +// .expect("second build is canonical"); + +// let receipts = &second.pending_state.execution_outcome.result.receipts; +// assert_eq!(receipts.len(), 3); +// assert_eq!(receipts[0].as_receipt().cumulative_gas_used, expected_tampered_gas); +// assert!( +// receipts[2].as_receipt().cumulative_gas_used +// > receipts[1].as_receipt().cumulative_gas_used +// ); +// } +// } diff --git a/crates/flashblocks/src/lib.rs b/crates/flashblocks/src/lib.rs index e2189344..034656f7 100644 --- a/crates/flashblocks/src/lib.rs +++ b/crates/flashblocks/src/lib.rs @@ -11,7 +11,7 @@ mod ws; mod test_utils; pub use cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}; -pub use execution::FlashblockCachedReceipt; +pub(crate) use execution::{BuildArgs, FlashblockCachedReceipt}; pub use service::FlashblocksRpcService; pub use subscription::FlashblocksPubSub; pub use ws::WsFlashBlockStream; From 51c9b265f645cb29e8d0a62a2b7ab1cebf290fd1 Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 18 Mar 2026 07:58:11 +0800 Subject: [PATCH 41/76] Revert "refactor(flashblocks): remove execution cache, inline types into execution mod" This reverts commit 707f5400acee128b00106a474dd77001150cf55b. --- crates/flashblocks/src/cache/confirm.rs | 12 +- crates/flashblocks/src/execution/cache.rs | 675 +++++++++++++++++++++ crates/flashblocks/src/execution/mod.rs | 32 +- crates/flashblocks/src/execution/worker.rs | 477 ++++++++------- crates/flashblocks/src/lib.rs | 2 +- 5 files changed, 946 insertions(+), 252 deletions(-) create mode 100644 crates/flashblocks/src/execution/cache.rs diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index 512f9525..ddbd1883 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -520,7 +520,7 @@ mod tests { let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 3); let block_hash = block.recovered_block.hash(); let tx_hashes: Vec<_> = - block.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); + block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); cache.insert(1, block, receipts).expect("insert"); for (i, tx_hash) in tx_hashes.iter().enumerate() { @@ -537,7 +537,7 @@ mod tests { let mut cache = ConfirmCache::::new(); let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); let tx_hashes: Vec<_> = - block.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); + block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); cache.insert(1, block, receipts).expect("insert"); cache.flush_up_to_height(1); @@ -551,7 +551,7 @@ mod tests { let mut cache = ConfirmCache::::new(); let (block, receipts) = make_executed_block_with_txs(5, B256::ZERO, 0, 2); let tx_hashes: Vec<_> = - block.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); + block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); cache.insert(5, block, receipts).expect("insert"); cache.remove_block_by_number(5); @@ -586,13 +586,13 @@ mod tests { let mut cache = ConfirmCache::::new(); let (block1, receipts1) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); let tx_hashes_1: Vec<_> = - block1.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); + block1.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); let parent = block1.recovered_block.hash(); cache.insert(1, block1, receipts1).expect("insert 1"); let (block2, receipts2) = make_executed_block_with_txs(2, parent, 100, 2); let tx_hashes_2: Vec<_> = - block2.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); + block2.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); cache.insert(2, block2, receipts2).expect("insert 2"); cache.flush_up_to_height(1); @@ -609,7 +609,7 @@ mod tests { let mut cache = ConfirmCache::::new(); let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); let tx_hashes: Vec<_> = - block.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); + block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); cache.insert(1, block, receipts).expect("insert"); cache.clear(); diff --git a/crates/flashblocks/src/execution/cache.rs b/crates/flashblocks/src/execution/cache.rs new file mode 100644 index 00000000..4c2efa92 --- /dev/null +++ b/crates/flashblocks/src/execution/cache.rs @@ -0,0 +1,675 @@ +//! Execution caching for flashblock building. +//! +//! When flashblocks arrive incrementally, each new flashblock triggers a rebuild of pending +//! state from all transactions in the sequence. To ensure that the incoming flashblocks +//! are incrementally re-built, from their sequence, the execution cache stores the cumulative +//! bundle state from previous executions. This ensures that states are not re-read from disk +//! for accounts/storage that were already loaded in previous builds. +//! +//! # Approach +//! +//! This module caches the cumulative bundle state from previous executions. When the next +//! flashblock arrives, if its transaction list is a continuation of the cached list, the +//! cached bundle can be used as a **prestate** for the State builder. This avoids redundant +//! disk reads for accounts/storage that were already modified. +//! +//! The cache stores: +//! - Ordered list of executed transaction hashes (for prefix matching) +//! - Cumulative bundle state after all cached transactions (used as prestate) +//! - Cumulative receipts for all cached transactions (for future optimization) +//! - Block-level execution metadata for cached transactions (gas/requests) +//! +//! # Example +//! +//! ```text +//! Flashblock 0: txs [A, B] +//! -> Execute A, B from scratch (cold state reads) +//! -> Cache: txs=[A,B], bundle=state_after_AB +//! +//! Flashblock 1: txs [A, B, C] +//! -> Prefix [A, B] matches cache +//! -> Use cached bundle as prestate (warm state) +//! -> Execute A, B, C (A, B hit prestate cache, faster) +//! -> Cache: txs=[A,B,C], bundle=state_after_ABC +//! +//! Flashblock 2 (reorg): txs [A, D, E] +//! -> Prefix [A] matches, but tx[1]=D != B +//! -> Cached prestate may be partially useful, but diverges +//! -> Execute A, D, E +//! ``` + +use alloy_eips::eip7685::Requests; +use alloy_primitives::B256; +use reth_primitives_traits::NodePrimitives; +use reth_revm::db::BundleState; + +/// Cached block-level execution metadata for the stored transaction prefix. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub(crate) struct CachedExecutionMeta { + /// EIP-7685 requests emitted while executing the cached prefix. + pub requests: Requests, + /// Total gas used by the cached prefix. + pub gas_used: u64, + /// Total blob/DA gas used by the cached prefix. + pub blob_gas_used: u64, +} + +/// Resumable cached state plus execution metadata for the cached prefix. +pub(crate) type ResumableState<'a, N> = + (&'a BundleState, &'a [::Receipt], &'a Requests, u64, u64, usize); + +/// Cache of transaction execution results for a single block. +/// +/// Stores cumulative execution state that can be used as a prestate to avoid +/// redundant disk reads when re-executing transactions. The cached bundle provides +/// warm state for accounts/storage already loaded, improving execution performance. +/// +/// **Note**: This cache does NOT skip transaction execution - all transactions must +/// still be executed to populate the block body. The cache only optimizes state reads. +/// +/// The cache is invalidated when: +/// - A new block starts (different block number) +/// - Parent hash changes for parent-scoped lookups +/// - A reorg is detected (transaction list diverges from cached prefix) +/// - Explicitly cleared +#[derive(Debug)] +pub struct TransactionCache { + /// Block number this cache is valid for. + block_number: u64, + /// Parent hash this cache is valid for. + cached_parent_hash: Option, + /// Ordered list of transaction hashes that have been executed. + executed_tx_hashes: Vec, + /// Cumulative bundle state after executing all cached transactions. + cumulative_bundle: BundleState, + /// Receipts for all cached transactions, in execution order. + receipts: Vec, + /// Cached block-level execution metadata. + execution_meta: CachedExecutionMeta, +} + +impl Default for TransactionCache { + fn default() -> Self { + Self::new() + } +} + +impl TransactionCache { + /// Creates a new empty transaction cache. + pub fn new() -> Self { + Self { + block_number: 0, + cached_parent_hash: None, + executed_tx_hashes: Vec::new(), + cumulative_bundle: BundleState::default(), + receipts: Vec::new(), + execution_meta: CachedExecutionMeta::default(), + } + } + + /// Creates a new cache for a specific block number. + pub fn for_block(block_number: u64) -> Self { + Self { block_number, ..Self::new() } + } + + /// Returns the block number this cache is valid for. + pub const fn block_number(&self) -> u64 { + self.block_number + } + + /// Returns the parent hash this cache is valid for, if tracked. + pub const fn parent_hash(&self) -> Option { + self.cached_parent_hash + } + + /// Checks if this cache is valid for the given block number. + pub const fn is_valid_for_block(&self, block_number: u64) -> bool { + self.block_number == block_number + } + + /// Checks if this cache is valid for the given block number and parent hash. + pub fn is_valid_for_block_parent(&self, block_number: u64, parent_hash: B256) -> bool { + self.block_number == block_number && self.cached_parent_hash == Some(parent_hash) + } + + /// Returns the number of cached transactions. + pub const fn len(&self) -> usize { + self.executed_tx_hashes.len() + } + + /// Returns true if the cache is empty. + pub const fn is_empty(&self) -> bool { + self.executed_tx_hashes.is_empty() + } + + /// Returns the cached transaction hashes. + pub fn executed_tx_hashes(&self) -> &[B256] { + &self.executed_tx_hashes + } + + /// Returns the cached receipts. + pub fn receipts(&self) -> &[N::Receipt] { + &self.receipts + } + + /// Returns the cumulative bundle state. + pub const fn bundle(&self) -> &BundleState { + &self.cumulative_bundle + } + + /// Clears the cache. + pub fn clear(&mut self) { + self.executed_tx_hashes.clear(); + self.cumulative_bundle = BundleState::default(); + self.receipts.clear(); + self.execution_meta = CachedExecutionMeta::default(); + self.block_number = 0; + self.cached_parent_hash = None; + } + + /// Updates the cache for a new block, clearing if the block number changed. + /// + /// Returns true if the cache was cleared. + pub fn update_for_block(&mut self, block_number: u64) -> bool { + if self.block_number == block_number { + false + } else { + self.clear(); + self.block_number = block_number; + true + } + } + + /// Computes the length of the matching prefix between cached transactions + /// and the provided transaction hashes. + /// + /// Returns the number of transactions that can be skipped because they + /// match the cached execution results. + pub fn matching_prefix_len(&self, tx_hashes: &[B256]) -> usize { + self.executed_tx_hashes + .iter() + .zip(tx_hashes.iter()) + .take_while(|(cached, incoming)| cached == incoming) + .count() + } + + /// Returns cached state for resuming execution if the incoming transactions have a + /// matching prefix with the cache. + /// + /// Returns `Some((bundle, receipts, requests, gas_used, blob_gas_used, skip_count))` + /// if there's a non-empty matching prefix, and the full cache matches the incoming + /// prefix, where: + /// - `bundle` is the cumulative state after the matching prefix + /// - `receipts` is the receipts for the matching prefix + /// - `skip_count` is the number of transactions to skip + /// + /// Returns `None` if: + /// - The cache is empty + /// - No prefix matches (first transaction differs) + /// - Block number doesn't match + pub(crate) fn get_resumable_state( + &self, + block_number: u64, + tx_hashes: &[B256], + ) -> Option> { + if !self.is_valid_for_block(block_number) || self.is_empty() { + return None; + } + + let prefix_len = self.matching_prefix_len(tx_hashes); + if prefix_len == 0 { + return None; + } + + // Only return state if the full cache matches (partial prefix would need + // intermediate state snapshots, which we don't currently store). + // Partial match means incoming txs diverge from cache, need to re-execute. + (prefix_len == self.executed_tx_hashes.len()).then_some(( + &self.cumulative_bundle, + self.receipts.as_slice(), + &self.execution_meta.requests, + self.execution_meta.gas_used, + self.execution_meta.blob_gas_used, + prefix_len, + )) + } + + /// Returns cached state for resuming execution if the incoming transactions have a + /// matching prefix with the cache and the parent hash matches. + /// + /// Returns `Some((bundle, receipts, requests, gas_used, blob_gas_used, skip_count))` + /// if there's a non-empty matching prefix, where the full cache matches the incoming prefix, and the + /// `(block_number, parent_hash)` tuple matches the cached scope. + pub(crate) fn get_resumable_state_for_parent( + &self, + block_number: u64, + parent_hash: B256, + tx_hashes: &[B256], + ) -> Option> { + if !self.is_valid_for_block_parent(block_number, parent_hash) || self.is_empty() { + return None; + } + + let prefix_len = self.matching_prefix_len(tx_hashes); + if prefix_len == 0 { + return None; + } + + (prefix_len == self.executed_tx_hashes.len()).then_some(( + &self.cumulative_bundle, + self.receipts.as_slice(), + &self.execution_meta.requests, + self.execution_meta.gas_used, + self.execution_meta.blob_gas_used, + prefix_len, + )) + } + + /// Updates the cache with new execution results. + /// + /// This should be called after executing a flashblock. The provided bundle + /// and receipts should represent the cumulative state after all transactions. + pub fn update( + &mut self, + block_number: u64, + tx_hashes: Vec, + bundle: BundleState, + receipts: Vec, + ) { + self.update_with_execution_meta( + block_number, + tx_hashes, + bundle, + receipts, + CachedExecutionMeta::default(), + ); + } + + /// Updates the cache with new execution results and block-level metadata. + pub(crate) fn update_with_execution_meta( + &mut self, + block_number: u64, + tx_hashes: Vec, + bundle: BundleState, + receipts: Vec, + execution_meta: CachedExecutionMeta, + ) { + self.block_number = block_number; + self.cached_parent_hash = None; + self.executed_tx_hashes = tx_hashes; + self.cumulative_bundle = bundle; + self.receipts = receipts; + self.execution_meta = execution_meta; + } + + /// Updates the cache with new execution results and block-level metadata, scoped to the + /// provided parent hash. + pub(crate) fn update_with_execution_meta_for_parent( + &mut self, + block_number: u64, + parent_hash: B256, + tx_hashes: Vec, + bundle: BundleState, + receipts: Vec, + execution_meta: CachedExecutionMeta, + ) { + self.block_number = block_number; + self.cached_parent_hash = Some(parent_hash); + self.executed_tx_hashes = tx_hashes; + self.cumulative_bundle = bundle; + self.receipts = receipts; + self.execution_meta = execution_meta; + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_optimism_primitives::OpPrimitives; + + type TestCache = TransactionCache; + + #[test] + fn test_cache_block_validation() { + let mut cache = TestCache::for_block(100); + assert!(cache.is_valid_for_block(100)); + assert!(!cache.is_valid_for_block(101)); + assert!(!cache.is_valid_for_block_parent(100, B256::repeat_byte(0x11))); + + // Update for same block doesn't clear + assert!(!cache.update_for_block(100)); + + // Update for different block clears + assert!(cache.update_for_block(101)); + assert!(cache.is_valid_for_block(101)); + assert!(cache.parent_hash().is_none()); + } + + #[test] + fn test_cache_clear() { + let mut cache = TestCache::for_block(100); + assert_eq!(cache.block_number(), 100); + + cache.clear(); + assert_eq!(cache.block_number(), 0); + assert!(cache.is_empty()); + } + + #[test] + fn test_matching_prefix_len() { + let mut cache = TestCache::for_block(100); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_c = B256::repeat_byte(0xCC); + let tx_d = B256::repeat_byte(0xDD); + + // Update cache with [A, B] + cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); + + // Full match + assert_eq!(cache.matching_prefix_len(&[tx_a, tx_b]), 2); + + // Continuation + assert_eq!(cache.matching_prefix_len(&[tx_a, tx_b, tx_c]), 2); + + // Partial match (reorg at position 1) + assert_eq!(cache.matching_prefix_len(&[tx_a, tx_d, tx_c]), 1); + + // No match (reorg at position 0) + assert_eq!(cache.matching_prefix_len(&[tx_d, tx_b, tx_c]), 0); + + // Empty incoming + assert_eq!(cache.matching_prefix_len(&[]), 0); + } + + #[test] + fn test_get_resumable_state() { + let mut cache = TestCache::for_block(100); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_c = B256::repeat_byte(0xCC); + + // Empty cache returns None + assert!(cache.get_resumable_state(100, &[tx_a, tx_b]).is_none()); + + // Update cache with [A, B] + cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); + + // Wrong block number returns None + assert!(cache.get_resumable_state(101, &[tx_a, tx_b]).is_none()); + + // Exact match returns state + let result = cache.get_resumable_state(100, &[tx_a, tx_b]); + assert!(result.is_some()); + let (_, _, _, _, _, skip) = result.unwrap(); + assert_eq!(skip, 2); + + // Continuation returns state (can skip cached txs) + let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); + assert!(result.is_some()); + let (_, _, _, _, _, skip) = result.unwrap(); + assert_eq!(skip, 2); + + // Partial match (reorg) returns None - can't use partial cache + assert!(cache.get_resumable_state(100, &[tx_a, tx_c]).is_none()); + } + + // ==================== E2E Cache Reuse Scenario Tests ==================== + + /// Tests the complete E2E cache scenario: fb0 [A,B] → fb1 [A,B,C] + /// Verifies that cached bundle can be used as prestate for the continuation. + #[test] + fn test_e2e_cache_reuse_continuation_scenario() { + let mut cache = TestCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_c = B256::repeat_byte(0xCC); + + // Simulate fb0: execute [A, B] from scratch + let fb0_txs = vec![tx_a, tx_b]; + assert!(cache.get_resumable_state(100, &fb0_txs).is_none()); + + // After fb0 execution, update cache + cache.update(100, fb0_txs, BundleState::default(), vec![]); + assert_eq!(cache.len(), 2); + + // Simulate fb1: [A, B, C] - should resume from cached state + let fb1_txs = vec![tx_a, tx_b, tx_c]; + let result = cache.get_resumable_state(100, &fb1_txs); + assert!(result.is_some()); + let (bundle, receipts, _, _, _, skip) = result.unwrap(); + + // skip=2 indicates 2 txs are covered by cached state (for logging) + // Note: All transactions are still executed, skip is informational only + assert_eq!(skip, 2); + // Bundle is used as prestate to warm the State builder + assert!(bundle.state.is_empty()); // Default bundle is empty in test + assert!(receipts.is_empty()); // No receipts in this test + + // After fb1 execution, update cache with full list + cache.update(100, fb1_txs, BundleState::default(), vec![]); + assert_eq!(cache.len(), 3); + } + + /// Tests reorg scenario: fb0 [A, B] → fb1 [A, D, E] + /// Verifies that divergent tx list invalidates cache. + #[test] + fn test_e2e_cache_reorg_scenario() { + let mut cache = TestCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_d = B256::repeat_byte(0xDD); + let tx_e = B256::repeat_byte(0xEE); + + // fb0: execute [A, B] + cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); + + // fb1 (reorg): [A, D, E] - tx[1] diverges, cannot resume + let fb1_txs = vec![tx_a, tx_d, tx_e]; + let result = cache.get_resumable_state(100, &fb1_txs); + assert!(result.is_none()); // Partial match means we can't use cache + } + + /// Tests multi-flashblock progression within same block: + /// fb0 [A] → fb1 [A,B] → fb2 [A,B,C] + /// + /// Each flashblock can use the previous bundle as prestate for warm state reads. + /// Note: All transactions are still executed; skip count is for logging only. + #[test] + fn test_e2e_multi_flashblock_progression() { + let mut cache = TestCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_c = B256::repeat_byte(0xCC); + + // fb0: [A] + cache.update(100, vec![tx_a], BundleState::default(), vec![]); + assert_eq!(cache.len(), 1); + + // fb1: [A, B] - cached state covers [A] (skip=1 for logging) + let fb1_txs = vec![tx_a, tx_b]; + let result = cache.get_resumable_state(100, &fb1_txs); + assert!(result.is_some()); + assert_eq!(result.unwrap().4, 1); // 1 tx covered by cache + + cache.update(100, fb1_txs, BundleState::default(), vec![]); + assert_eq!(cache.len(), 2); + + // fb2: [A, B, C] - cached state covers [A, B] (skip=2 for logging) + let fb2_txs = vec![tx_a, tx_b, tx_c]; + let result = cache.get_resumable_state(100, &fb2_txs); + assert!(result.is_some()); + assert_eq!(result.unwrap().5, 2); // 2 txs covered by cache + + cache.update(100, fb2_txs, BundleState::default(), vec![]); + assert_eq!(cache.len(), 3); + } + + /// Tests that cache is invalidated on block number change. + #[test] + fn test_e2e_block_transition_clears_cache() { + let mut cache = TestCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + + // Block 100: cache [A, B] + cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); + assert_eq!(cache.len(), 2); + + // Block 101: same txs shouldn't resume (different block) + let result = cache.get_resumable_state(101, &[tx_a, tx_b]); + assert!(result.is_none()); + + // Explicit block update clears cache + cache.update_for_block(101); + assert!(cache.is_empty()); + } + + /// Tests cache behavior with empty transaction list. + #[test] + fn test_cache_empty_transactions() { + let mut cache = TestCache::new(); + + // Empty flashblock (only system tx, no user txs) + cache.update(100, vec![], BundleState::default(), vec![]); + assert!(cache.is_empty()); + + // Can't resume from empty cache + let tx_a = B256::repeat_byte(0xAA); + assert!(cache.get_resumable_state(100, &[tx_a]).is_none()); + } + + /// Documents the semantics of `skip_count`. + /// + /// A resumable state is only returned when the incoming transaction list fully extends the + /// cached list. In that case, `skip_count` is the number of prefix transactions covered by + /// cached execution output. + #[test] + fn test_skip_count_matches_cached_prefix_len() { + let mut cache = TestCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_c = B256::repeat_byte(0xCC); + + // Cache state after executing [A, B] + cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); + + // get_resumable_state returns skip=2 for prefix [A, B] + let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); + assert!(result.is_some()); + let (bundle, _receipts, _, _, _, skip_count) = result.unwrap(); + + // skip_count indicates cached prefix length + assert_eq!(skip_count, 2); + + // The bundle is the important part - used as resumable prestate. + assert!(bundle.state.is_empty()); // Default in test, real one has state + } + + /// Tests that receipts are properly cached and returned. + #[test] + fn test_cache_preserves_receipts() { + use op_alloy_consensus::OpReceipt; + use reth_optimism_primitives::OpPrimitives; + + let mut cache: TransactionCache = TransactionCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + + // Create mock receipts + let receipt_a = OpReceipt::Legacy(alloy_consensus::Receipt { + status: alloy_consensus::Eip658Value::Eip658(true), + cumulative_gas_used: 21000, + logs: vec![], + }); + let receipt_b = OpReceipt::Legacy(alloy_consensus::Receipt { + status: alloy_consensus::Eip658Value::Eip658(true), + cumulative_gas_used: 42000, + logs: vec![], + }); + + cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![receipt_a, receipt_b]); + + // Verify receipts are preserved + assert_eq!(cache.receipts().len(), 2); + + // On resumable state, receipts are returned + let tx_c = B256::repeat_byte(0xCC); + let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); + assert!(result.is_some()); + let (_, receipts, _, _, _, _) = result.unwrap(); + assert_eq!(receipts.len(), 2); + } + + #[test] + fn test_cache_preserves_execution_meta() { + let mut cache = TestCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_c = B256::repeat_byte(0xCC); + + let mut requests = Requests::default(); + requests.push_request_with_type(0x01, [0xAA, 0xBB]); + + cache.update_with_execution_meta( + 100, + vec![tx_a, tx_b], + BundleState::default(), + vec![], + CachedExecutionMeta { + requests: requests.clone(), + gas_used: 42_000, + blob_gas_used: 123, + }, + ); + + let resumable = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); + assert!(resumable.is_some()); + let (_, _, cached_requests, gas_used, blob_gas_used, skip_count) = resumable.unwrap(); + assert_eq!(skip_count, 2); + assert_eq!(gas_used, 42_000); + assert_eq!(blob_gas_used, 123); + assert_eq!(cached_requests, &requests); + } + + #[test] + fn test_cache_parent_scoping() { + let mut cache = TestCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_c = B256::repeat_byte(0xCC); + let parent_a = B256::repeat_byte(0x11); + let parent_b = B256::repeat_byte(0x22); + + cache.update_with_execution_meta_for_parent( + 100, + parent_a, + vec![tx_a, tx_b], + BundleState::default(), + vec![], + CachedExecutionMeta { + requests: Requests::default(), + gas_used: 42_000, + blob_gas_used: 0, + }, + ); + + // Matching block + parent should hit. + let hit = cache.get_resumable_state_for_parent(100, parent_a, &[tx_a, tx_b, tx_c]); + assert!(hit.is_some()); + + // Same block but different parent should miss. + let miss = cache.get_resumable_state_for_parent(100, parent_b, &[tx_a, tx_b, tx_c]); + assert!(miss.is_none()); + } +} diff --git a/crates/flashblocks/src/execution/mod.rs b/crates/flashblocks/src/execution/mod.rs index 70904c40..751b3fd3 100644 --- a/crates/flashblocks/src/execution/mod.rs +++ b/crates/flashblocks/src/execution/mod.rs @@ -1,29 +1,5 @@ -pub(crate) mod worker; - -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; -use reth_optimism_primitives::OpReceipt; - -pub(crate) struct BuildArgs { - pub(crate) base: OpFlashblockPayloadBase, - pub(crate) transactions: I, - pub(crate) last_flashblock_index: u64, -} +mod cache; +use cache::{CachedExecutionMeta, TransactionCache}; -/// Receipt requirements for cache-resume flow. -pub(crate) trait FlashblockCachedReceipt: Clone { - /// Adds `gas_offset` to each receipt's `cumulative_gas_used`. - fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64); -} - -impl FlashblockCachedReceipt for OpReceipt { - fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64) { - if gas_offset == 0 { - return; - } - - for receipt in receipts { - let inner = receipt.as_receipt_mut(); - inner.cumulative_gas_used = inner.cumulative_gas_used.saturating_add(gas_offset); - } - } -} +pub(crate) mod worker; +pub use worker::{BuildArgs, BuildResult, FlashblockCachedReceipt}; diff --git a/crates/flashblocks/src/execution/worker.rs b/crates/flashblocks/src/execution/worker.rs index 973bc419..4d0f9154 100644 --- a/crates/flashblocks/src/execution/worker.rs +++ b/crates/flashblocks/src/execution/worker.rs @@ -1,11 +1,12 @@ use crate::{ cache::{FlashblockStateCache, PendingSequence}, - BuildArgs, FlashblockCachedReceipt, + execution::{CachedExecutionMeta, TransactionCache}, }; use std::{ sync::Arc, time::{Duration, Instant}, }; +use tokio_util::sync::CancellationToken; use tracing::trace; use alloy_eips::{eip2718::WithEncoded, BlockNumberOrTag}; @@ -21,11 +22,13 @@ use reth_evm::{ ConfigureEvm, Evm, }; use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; +use reth_optimism_primitives::OpReceipt; use reth_primitives_traits::{ transaction::TxHashRef, AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy, Recovered, RecoveredBlock, SealedHeader, }; use reth_revm::{ + cached::CachedReads, database::StateProviderDatabase, db::{states::bundle_state::BundleRetention, BundleState, State}, }; @@ -35,41 +38,81 @@ use reth_storage_api::{ StateRootProvider, }; +pub(crate) struct BuildArgs { + pub(crate) base: OpFlashblockPayloadBase, + pub(crate) transactions: I, + pub(crate) cached_state: Option<(B256, CachedReads)>, + pub(crate) last_flashblock_index: u64, + pub(crate) cancel: CancellationToken, +} + /// The `FlashblocksValidator` builds [`PendingBlock`] out of a sequence of transactions. /// /// Owns a [`TransactionCache`] for incremental prefix caching between flashblock builds. #[derive(Debug)] -pub(crate) struct FlashblockSequenceValidator -where - N::Receipt: FlashblockCachedReceipt, -{ +pub(crate) struct FlashblocksValidator { /// The EVM configuration used to build the flashblocks. evm_config: EvmConfig, - /// The canonical chainstate provider. - provider: Provider, - /// The flashblocks state cache containing the flashblocks state cache layer. + /// The transaction execution cache for incremental executions. + tx_cache: TransactionCache, + /// The state cache containing the canonical chainstate provider and the flashblocks /// state cache layer. - flashblocks_state: FlashblockStateCache, + state_cache: FlashblockStateCache, } -impl FlashblockSequenceValidator -where - N::Receipt: FlashblockCachedReceipt, -{ +impl FlashblocksValidator { pub(crate) fn new( evm_config: EvmConfig, - provider: Provider, - flashblocks_state: FlashblockStateCache, + state_cache: FlashblockStateCache, ) -> Self { - Self { evm_config, provider, flashblocks_state } + Self { evm_config, state_cache, tx_cache: TransactionCache::new() } } pub(crate) const fn provider(&self) -> &Provider { &self.provider } + + /// Clears the transaction cache (used on reorg/catch-up). + pub(crate) fn clear_cache(&mut self) { + self.tx_cache.clear(); + } } -impl FlashblockSequenceValidator +/// Cached prefix execution data used to resume canonical builds. +#[derive(Debug, Clone)] +struct CachedPrefixExecutionResult { + /// Number of leading transactions covered by cached execution. + cached_tx_count: usize, + /// Cumulative bundle state after executing the cached prefix. + bundle: BundleState, + /// Cached receipts for the prefix. + receipts: Vec, + /// Total gas used by the cached prefix. + gas_used: u64, + /// Total blob/DA gas used by the cached prefix. + blob_gas_used: u64, +} + +/// Receipt requirements for cache-resume flow. +pub trait FlashblockCachedReceipt: Clone { + /// Adds `gas_offset` to each receipt's `cumulative_gas_used`. + fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64); +} + +impl FlashblockCachedReceipt for OpReceipt { + fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64) { + if gas_offset == 0 { + return; + } + + for receipt in receipts { + let inner = receipt.as_receipt_mut(); + inner.cumulative_gas_used = inner.cumulative_gas_used.saturating_add(gas_offset); + } + } +} + +impl FlashBlockBuilder where N: NodePrimitives, N::Receipt: FlashblockCachedReceipt, @@ -97,7 +140,7 @@ where pub(crate) fn execute>>>( &mut self, mut args: BuildArgs, - ) -> eyre::Result<()> { + ) -> eyre::Result> { trace!(target: "flashblocks", "Attempting new pending block from flashblocks"); let parent_hash = args.base.parent_hash; @@ -434,202 +477,202 @@ fn is_consistent_speculative_parent_hashes( incoming_parent_hash == pending_block_hash && pending_block_hash == pending_sealed_hash } -// #[cfg(test)] -// mod tests { -// use super::{is_consistent_speculative_parent_hashes, BuildArgs, FlashBlockBuilder}; -// use crate::execution::cache::CachedExecutionMeta; -// use alloy_consensus::{SignableTransaction, TxEip1559}; -// use alloy_eips::eip2718::Encodable2718; -// use alloy_network::TxSignerSync; -// use alloy_primitives::{Address, StorageKey, StorageValue, TxKind, B256, U256}; -// use alloy_signer_local::PrivateKeySigner; -// use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; -// use op_revm::constants::L1_BLOCK_CONTRACT; -// use reth_optimism_chainspec::OP_MAINNET; -// use reth_optimism_evm::OpEvmConfig; -// use reth_optimism_primitives::{OpBlock, OpPrimitives, OpTransactionSigned}; -// use reth_primitives_traits::{AlloyBlockHeader, Recovered, SignerRecoverable}; -// use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; -// use reth_provider::ChainSpecProvider; -// use reth_storage_api::BlockReaderIdExt; -// use std::str::FromStr; - -// fn signed_transfer_tx( -// signer: &PrivateKeySigner, -// nonce: u64, -// recipient: Address, -// ) -> OpTransactionSigned { -// let mut tx = TxEip1559 { -// chain_id: 10, // OP Mainnet chain id -// nonce, -// gas_limit: 100_000, -// max_priority_fee_per_gas: 1_000_000_000, -// max_fee_per_gas: 2_000_000_000, -// to: TxKind::Call(recipient), -// value: U256::from(1), -// ..Default::default() -// }; -// let signature = signer.sign_transaction_sync(&mut tx).expect("signing tx succeeds"); -// tx.into_signed(signature).into() -// } - -// fn into_encoded_recovered( -// tx: OpTransactionSigned, -// signer: Address, -// ) -> alloy_eips::eip2718::WithEncoded> { -// let encoded = tx.encoded_2718(); -// Recovered::new_unchecked(tx, signer).into_encoded_with(encoded) -// } - -// #[test] -// fn speculative_parent_hashes_must_all_match() { -// let h = B256::repeat_byte(0x11); -// assert!(is_consistent_speculative_parent_hashes(h, h, h)); -// } - -// #[test] -// fn speculative_parent_hashes_reject_any_mismatch() { -// let incoming = B256::repeat_byte(0x11); -// let pending = B256::repeat_byte(0x22); -// let sealed = B256::repeat_byte(0x33); - -// assert!(!is_consistent_speculative_parent_hashes(incoming, pending, sealed)); -// assert!(!is_consistent_speculative_parent_hashes(incoming, incoming, sealed)); -// assert!(!is_consistent_speculative_parent_hashes(incoming, pending, pending)); -// } - -// #[test] -// fn canonical_build_reuses_cached_prefix_execution() { -// let provider = MockEthProvider::::new().with_chain_spec(OP_MAINNET.clone()); -// let genesis_hash = provider.chain_spec().genesis_hash(); -// let genesis_block = -// OpBlock::new(provider.chain_spec().genesis_header().clone(), Default::default()); -// provider.add_block(genesis_hash, genesis_block); - -// let recipient = Address::repeat_byte(0x22); -// let signer = PrivateKeySigner::random(); -// let tx_a = signed_transfer_tx(&signer, 0, recipient); -// let tx_b = signed_transfer_tx(&signer, 1, recipient); -// let tx_c = signed_transfer_tx(&signer, 2, recipient); -// let signer = tx_a.recover_signer().expect("tx signer recovery succeeds"); - -// provider.add_account(signer, ExtendedAccount::new(0, U256::from(1_000_000_000_000_000u64))); -// provider.add_account(recipient, ExtendedAccount::new(0, U256::ZERO)); -// provider.add_account( -// L1_BLOCK_CONTRACT, -// ExtendedAccount::new(1, U256::ZERO).extend_storage([ -// (StorageKey::with_last_byte(1), StorageValue::from(1_000_000_000u64)), -// (StorageKey::with_last_byte(5), StorageValue::from(188u64)), -// (StorageKey::with_last_byte(6), StorageValue::from(684_000u64)), -// ( -// StorageKey::with_last_byte(3), -// StorageValue::from_str( -// "0x0000000000000000000000000000000000001db0000d27300000000000000005", -// ) -// .expect("valid L1 fee scalar storage value"), -// ), -// ]), -// ); - -// let latest = provider -// .latest_header() -// .expect("provider latest header query succeeds") -// .expect("genesis header exists"); - -// let base = OpFlashblockPayloadBase { -// parent_hash: latest.hash(), -// parent_beacon_block_root: B256::ZERO, -// fee_recipient: Address::ZERO, -// prev_randao: B256::repeat_byte(0x55), -// block_number: latest.number() + 1, -// gas_limit: 30_000_000, -// timestamp: latest.timestamp() + 2, -// extra_data: Default::default(), -// base_fee_per_gas: U256::from(1_000_000_000u64), -// }; -// let base_parent_hash = base.parent_hash; - -// let tx_a_hash = B256::from(*tx_a.tx_hash()); -// let tx_b_hash = B256::from(*tx_b.tx_hash()); -// let tx_c_hash = B256::from(*tx_c.tx_hash()); - -// let tx_a = into_encoded_recovered(tx_a, signer); -// let tx_b = into_encoded_recovered(tx_b, signer); -// let tx_c = into_encoded_recovered(tx_c, signer); - -// let evm_config = OpEvmConfig::optimism(OP_MAINNET.clone()); -// let mut builder = FlashBlockBuilder::::new(evm_config, provider); - -// let first = builder -// .execute(BuildArgs { -// base: base.clone(), -// transactions: vec![tx_a.clone(), tx_b.clone()], -// cached_state: None, -// last_flashblock_index: 0, -// last_flashblock_hash: B256::repeat_byte(0xA0), -// compute_state_root: false, -// pending_parent: None, -// }) -// .expect("first build succeeds") -// .expect("first build is canonical"); - -// assert_eq!(first.pending_state.execution_outcome.result.receipts.len(), 2); - -// let cached_hashes = vec![tx_a_hash, tx_b_hash]; -// let (bundle, receipts, requests, gas_used, blob_gas_used, skip) = builder -// .tx_cache -// .get_resumable_state_with_execution_meta_for_parent( -// base.block_number, -// base_parent_hash, -// &cached_hashes, -// ) -// .expect("cache should contain first build execution state"); -// assert_eq!(skip, 2); - -// let mut tampered_receipts = receipts.to_vec(); -// tampered_receipts[0].as_receipt_mut().cumulative_gas_used = -// tampered_receipts[0].as_receipt().cumulative_gas_used.saturating_add(17); -// let expected_tampered_gas = tampered_receipts[0].as_receipt().cumulative_gas_used; - -// builder.tx_cache.update_with_execution_meta_for_parent( -// base.block_number, -// base_parent_hash, -// cached_hashes, -// bundle.clone(), -// tampered_receipts, -// CachedExecutionMeta { requests: requests.clone(), gas_used, blob_gas_used }, -// ); - -// let second_hashes = vec![tx_a_hash, tx_b_hash, tx_c_hash]; -// let (_, _, _, _, _, skip) = builder -// .tx_cache -// .get_resumable_state_with_execution_meta_for_parent( -// base.block_number, -// base_parent_hash, -// &second_hashes, -// ) -// .expect("second tx list should extend cached prefix"); -// assert_eq!(skip, 2); - -// let second = builder -// .execute(BuildArgs { -// base, -// transactions: vec![tx_a, tx_b, tx_c], -// cached_state: None, -// last_flashblock_index: 1, -// last_flashblock_hash: B256::repeat_byte(0xA1), -// compute_state_root: false, -// pending_parent: None, -// }) -// .expect("second build succeeds") -// .expect("second build is canonical"); - -// let receipts = &second.pending_state.execution_outcome.result.receipts; -// assert_eq!(receipts.len(), 3); -// assert_eq!(receipts[0].as_receipt().cumulative_gas_used, expected_tampered_gas); -// assert!( -// receipts[2].as_receipt().cumulative_gas_used -// > receipts[1].as_receipt().cumulative_gas_used -// ); -// } -// } +#[cfg(test)] +mod tests { + use super::{is_consistent_speculative_parent_hashes, BuildArgs, FlashBlockBuilder}; + use crate::execution::cache::CachedExecutionMeta; + use alloy_consensus::{SignableTransaction, TxEip1559}; + use alloy_eips::eip2718::Encodable2718; + use alloy_network::TxSignerSync; + use alloy_primitives::{Address, StorageKey, StorageValue, TxKind, B256, U256}; + use alloy_signer_local::PrivateKeySigner; + use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; + use op_revm::constants::L1_BLOCK_CONTRACT; + use reth_optimism_chainspec::OP_MAINNET; + use reth_optimism_evm::OpEvmConfig; + use reth_optimism_primitives::{OpBlock, OpPrimitives, OpTransactionSigned}; + use reth_primitives_traits::{AlloyBlockHeader, Recovered, SignerRecoverable}; + use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; + use reth_provider::ChainSpecProvider; + use reth_storage_api::BlockReaderIdExt; + use std::str::FromStr; + + fn signed_transfer_tx( + signer: &PrivateKeySigner, + nonce: u64, + recipient: Address, + ) -> OpTransactionSigned { + let mut tx = TxEip1559 { + chain_id: 10, // OP Mainnet chain id + nonce, + gas_limit: 100_000, + max_priority_fee_per_gas: 1_000_000_000, + max_fee_per_gas: 2_000_000_000, + to: TxKind::Call(recipient), + value: U256::from(1), + ..Default::default() + }; + let signature = signer.sign_transaction_sync(&mut tx).expect("signing tx succeeds"); + tx.into_signed(signature).into() + } + + fn into_encoded_recovered( + tx: OpTransactionSigned, + signer: Address, + ) -> alloy_eips::eip2718::WithEncoded> { + let encoded = tx.encoded_2718(); + Recovered::new_unchecked(tx, signer).into_encoded_with(encoded) + } + + #[test] + fn speculative_parent_hashes_must_all_match() { + let h = B256::repeat_byte(0x11); + assert!(is_consistent_speculative_parent_hashes(h, h, h)); + } + + #[test] + fn speculative_parent_hashes_reject_any_mismatch() { + let incoming = B256::repeat_byte(0x11); + let pending = B256::repeat_byte(0x22); + let sealed = B256::repeat_byte(0x33); + + assert!(!is_consistent_speculative_parent_hashes(incoming, pending, sealed)); + assert!(!is_consistent_speculative_parent_hashes(incoming, incoming, sealed)); + assert!(!is_consistent_speculative_parent_hashes(incoming, pending, pending)); + } + + #[test] + fn canonical_build_reuses_cached_prefix_execution() { + let provider = MockEthProvider::::new().with_chain_spec(OP_MAINNET.clone()); + let genesis_hash = provider.chain_spec().genesis_hash(); + let genesis_block = + OpBlock::new(provider.chain_spec().genesis_header().clone(), Default::default()); + provider.add_block(genesis_hash, genesis_block); + + let recipient = Address::repeat_byte(0x22); + let signer = PrivateKeySigner::random(); + let tx_a = signed_transfer_tx(&signer, 0, recipient); + let tx_b = signed_transfer_tx(&signer, 1, recipient); + let tx_c = signed_transfer_tx(&signer, 2, recipient); + let signer = tx_a.recover_signer().expect("tx signer recovery succeeds"); + + provider.add_account(signer, ExtendedAccount::new(0, U256::from(1_000_000_000_000_000u64))); + provider.add_account(recipient, ExtendedAccount::new(0, U256::ZERO)); + provider.add_account( + L1_BLOCK_CONTRACT, + ExtendedAccount::new(1, U256::ZERO).extend_storage([ + (StorageKey::with_last_byte(1), StorageValue::from(1_000_000_000u64)), + (StorageKey::with_last_byte(5), StorageValue::from(188u64)), + (StorageKey::with_last_byte(6), StorageValue::from(684_000u64)), + ( + StorageKey::with_last_byte(3), + StorageValue::from_str( + "0x0000000000000000000000000000000000001db0000d27300000000000000005", + ) + .expect("valid L1 fee scalar storage value"), + ), + ]), + ); + + let latest = provider + .latest_header() + .expect("provider latest header query succeeds") + .expect("genesis header exists"); + + let base = OpFlashblockPayloadBase { + parent_hash: latest.hash(), + parent_beacon_block_root: B256::ZERO, + fee_recipient: Address::ZERO, + prev_randao: B256::repeat_byte(0x55), + block_number: latest.number() + 1, + gas_limit: 30_000_000, + timestamp: latest.timestamp() + 2, + extra_data: Default::default(), + base_fee_per_gas: U256::from(1_000_000_000u64), + }; + let base_parent_hash = base.parent_hash; + + let tx_a_hash = B256::from(*tx_a.tx_hash()); + let tx_b_hash = B256::from(*tx_b.tx_hash()); + let tx_c_hash = B256::from(*tx_c.tx_hash()); + + let tx_a = into_encoded_recovered(tx_a, signer); + let tx_b = into_encoded_recovered(tx_b, signer); + let tx_c = into_encoded_recovered(tx_c, signer); + + let evm_config = OpEvmConfig::optimism(OP_MAINNET.clone()); + let mut builder = FlashBlockBuilder::::new(evm_config, provider); + + let first = builder + .execute(BuildArgs { + base: base.clone(), + transactions: vec![tx_a.clone(), tx_b.clone()], + cached_state: None, + last_flashblock_index: 0, + last_flashblock_hash: B256::repeat_byte(0xA0), + compute_state_root: false, + pending_parent: None, + }) + .expect("first build succeeds") + .expect("first build is canonical"); + + assert_eq!(first.pending_state.execution_outcome.result.receipts.len(), 2); + + let cached_hashes = vec![tx_a_hash, tx_b_hash]; + let (bundle, receipts, requests, gas_used, blob_gas_used, skip) = builder + .tx_cache + .get_resumable_state_with_execution_meta_for_parent( + base.block_number, + base_parent_hash, + &cached_hashes, + ) + .expect("cache should contain first build execution state"); + assert_eq!(skip, 2); + + let mut tampered_receipts = receipts.to_vec(); + tampered_receipts[0].as_receipt_mut().cumulative_gas_used = + tampered_receipts[0].as_receipt().cumulative_gas_used.saturating_add(17); + let expected_tampered_gas = tampered_receipts[0].as_receipt().cumulative_gas_used; + + builder.tx_cache.update_with_execution_meta_for_parent( + base.block_number, + base_parent_hash, + cached_hashes, + bundle.clone(), + tampered_receipts, + CachedExecutionMeta { requests: requests.clone(), gas_used, blob_gas_used }, + ); + + let second_hashes = vec![tx_a_hash, tx_b_hash, tx_c_hash]; + let (_, _, _, _, _, skip) = builder + .tx_cache + .get_resumable_state_with_execution_meta_for_parent( + base.block_number, + base_parent_hash, + &second_hashes, + ) + .expect("second tx list should extend cached prefix"); + assert_eq!(skip, 2); + + let second = builder + .execute(BuildArgs { + base, + transactions: vec![tx_a, tx_b, tx_c], + cached_state: None, + last_flashblock_index: 1, + last_flashblock_hash: B256::repeat_byte(0xA1), + compute_state_root: false, + pending_parent: None, + }) + .expect("second build succeeds") + .expect("second build is canonical"); + + let receipts = &second.pending_state.execution_outcome.result.receipts; + assert_eq!(receipts.len(), 3); + assert_eq!(receipts[0].as_receipt().cumulative_gas_used, expected_tampered_gas); + assert!( + receipts[2].as_receipt().cumulative_gas_used + > receipts[1].as_receipt().cumulative_gas_used + ); + } +} diff --git a/crates/flashblocks/src/lib.rs b/crates/flashblocks/src/lib.rs index 034656f7..e2189344 100644 --- a/crates/flashblocks/src/lib.rs +++ b/crates/flashblocks/src/lib.rs @@ -11,7 +11,7 @@ mod ws; mod test_utils; pub use cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}; -pub(crate) use execution::{BuildArgs, FlashblockCachedReceipt}; +pub use execution::FlashblockCachedReceipt; pub use service::FlashblocksRpcService; pub use subscription::FlashblocksPubSub; pub use ws::WsFlashBlockStream; From 190237cc62f1041445117c4efe203ecaaa10c8c8 Mon Sep 17 00:00:00 2001 From: Niven Date: Thu, 19 Mar 2026 21:18:47 +0800 Subject: [PATCH 42/76] feat(flashblocks-rpc): Revamp worker, add state root strategies into flashblocks sequence validation (#205) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor(flashblocks): merge processor into validator with parallel SR Consolidate `processor.rs` into `validator.rs`, removing the separate module. Add parallel and overlay-based state root computation via `reth-trie-parallel`, `reth-trie-db`, and `reth-engine-tree`. Introduce `OverlayProviderFactory` trait alias and `compute_state_root` dispatcher that tries parallel SR first and falls back to serial. Replace direct `BlockBuilder` usage with `PayloadProcessor`-based execution using a no-op `flashblock_tx_iterator` for pre-recovered transactions. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.6 * fix(flashblocks): sequence validator with payload processor 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.6 * feat(flashblocks): add flashblock assembler 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.6 --------- Co-authored-by: Claude Sonnet 4.6 --- Cargo.lock | 31 +- Cargo.toml | 7 +- crates/flashblocks/Cargo.toml | 12 + crates/flashblocks/src/cache/confirm.rs | 56 +- crates/flashblocks/src/cache/mod.rs | 289 +++-- crates/flashblocks/src/cache/pending.rs | 43 +- crates/flashblocks/src/execution/assemble.rs | 118 ++ crates/flashblocks/src/execution/cache.rs | 675 ----------- crates/flashblocks/src/execution/mod.rs | 100 +- crates/flashblocks/src/execution/validator.rs | 1041 +++++++++++++++++ crates/flashblocks/src/execution/worker.rs | 678 ----------- crates/flashblocks/src/lib.rs | 1 - crates/flashblocks/src/subscription/rpc.rs | 20 +- crates/flashblocks/src/validation.rs | 599 ---------- crates/rpc/Cargo.toml | 1 + 15 files changed, 1513 insertions(+), 2158 deletions(-) create mode 100644 crates/flashblocks/src/execution/assemble.rs delete mode 100644 crates/flashblocks/src/execution/cache.rs create mode 100644 crates/flashblocks/src/execution/validator.rs delete mode 100644 crates/flashblocks/src/execution/worker.rs delete mode 100644 crates/flashblocks/src/validation.rs diff --git a/Cargo.lock b/Cargo.lock index 499fe18f..03af9c82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -730,9 +730,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2ce1e0dbf7720eee747700e300c99aac01b1a95bb93f493a01e78ee28bb1a37" +checksum = "9e6d631f8b975229361d8af7b2c749af31c73b3cf1352f90e144ddb06227105e" dependencies = [ "alloy-primitives", "arbitrary", @@ -8073,7 +8073,7 @@ dependencies = [ "reth-node-api", "reth-primitives-traits", "reth-tracing", - "ringbuffer 0.16.0", + "ringbuffer", "serde", "serde_json", "tokio", @@ -9554,7 +9554,7 @@ dependencies = [ "reth-rpc-eth-types", "reth-storage-api", "reth-tasks", - "ringbuffer 0.16.0", + "ringbuffer", "serde_json", "tokio", "tokio-tungstenite 0.28.0", @@ -11028,12 +11028,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "ringbuffer" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3df6368f71f205ff9c33c076d170dd56ebf68e8161c733c0caa07a7a5509ed53" - [[package]] name = "ringbuffer" version = "0.16.0" @@ -14177,7 +14171,9 @@ name = "xlayer-flashblocks" version = "0.1.0" dependencies = [ "alloy-consensus", + "alloy-eip7928", "alloy-eips", + "alloy-evm", "alloy-json-rpc", "alloy-network", "alloy-primitives", @@ -14186,6 +14182,7 @@ dependencies = [ "alloy-signer-local", "async-trait", "brotli", + "crossbeam-channel", "derive_more", "eyre", "futures", @@ -14200,6 +14197,8 @@ dependencies = [ "reth-chain-state", "reth-chainspec", "reth-db-models", + "reth-engine-primitives", + "reth-engine-tree", "reth-errors", "reth-evm", "reth-execution-types", @@ -14207,8 +14206,10 @@ dependencies = [ "reth-node-api", "reth-node-core", "reth-optimism-chainspec", + "reth-optimism-consensus", "reth-optimism-evm", "reth-optimism-flashblocks", + "reth-optimism-forks", "reth-optimism-primitives", "reth-primitives-traits", "reth-provider", @@ -14221,7 +14222,11 @@ dependencies = [ "reth-storage-api", "reth-tasks", "reth-tracing", - "ringbuffer 0.15.0", + "reth-trie", + "reth-trie-common", + "reth-trie-db", + "reth-trie-parallel", + "ringbuffer", "serde", "serde_json", "test-case", @@ -14352,17 +14357,17 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-eth", - "async-trait", + "alloy-serde", "futures", "jsonrpsee", "jsonrpsee-types", "op-alloy-network", + "op-alloy-rpc-types", "reth-chain-state", "reth-optimism-primitives", "reth-optimism-rpc", "reth-primitives-traits", "reth-revm", - "reth-rpc", "reth-rpc-convert", "reth-rpc-eth-api", "reth-rpc-eth-types", diff --git a/Cargo.toml b/Cargo.toml index 08af2518..b71435a6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,6 +91,7 @@ reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", defaul reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } reth-db-models = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } reth-engine-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth-engine-tree = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } reth-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } reth-ethereum-forks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } reth-evm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } @@ -129,6 +130,8 @@ reth-tracing-otlp = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11. reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } reth-trie = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } reth-trie-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth-trie-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } +reth-trie-parallel = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } # ============================================================================== # Reth Optimism Dependencies (from local optimism/rust op-reth) @@ -166,6 +169,7 @@ alloy-chains = { version = "0.2.30", default-features = false } alloy-contract = { version = "~1.6" } alloy-consensus = { version = "~1.6", default-features = false } alloy-eips = { version = "~1.6", default-features = false } +alloy-eip7928 = { version = "0.3.0", default-features = false } alloy-evm = { version = "~0.27", default-features = false } alloy-genesis = { version = "~1.6", default-features = false } alloy-json-rpc = { version = "~1.6" } @@ -222,6 +226,7 @@ jsonrpsee-core = { version = "0.26.0" } # misc clap = { version = "4.4.3" } +crossbeam-channel = "0.5.13" derive_more = { version = "2", default-features = false, features = ["full"] } dashmap = "6.1" either = { version = "1.15.0", default-features = false } @@ -242,7 +247,7 @@ tracing = { version = "0.1.41" } shellexpand = "3.1" url = "2.5" brotli = "8.0" -ringbuffer = "0.15" +ringbuffer = "=0.16.0" # p2p libp2p = { version = "0.56", features = ["identify", "ping", "noise", "tcp", "autonat", "mdns", "tokio", "cbor", "macros", "yamux", "dns"] } diff --git a/crates/flashblocks/Cargo.toml b/crates/flashblocks/Cargo.toml index 3c47a756..8dd2e149 100644 --- a/crates/flashblocks/Cargo.toml +++ b/crates/flashblocks/Cargo.toml @@ -23,6 +23,8 @@ reth-execution-types = { workspace = true, features = ["serde"] } reth-metrics.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true +reth-optimism-consensus.workspace = true +reth-optimism-forks.workspace = true reth-optimism-primitives = { workspace = true, features = ["serde"] } reth-primitives-traits = { workspace = true, features = ["serde"] } reth-revm.workspace = true @@ -33,12 +35,21 @@ reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-optimism-flashblocks.workspace = true reth-storage-api.workspace = true +reth-trie-common.workspace = true +reth-trie.workspace = true +reth-trie-db.workspace = true +reth-trie-parallel.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-tracing.workspace = true +reth-engine-primitives.workspace = true +reth-engine-tree.workspace = true +reth-provider.workspace = true # alloy alloy-consensus.workspace = true alloy-eips = { workspace = true, features = ["serde"] } +alloy-eip7928.workspace = true +alloy-evm.workspace = true alloy-json-rpc.workspace = true alloy-primitives = { workspace = true, features = ["serde"] } alloy-rpc-types-engine = { workspace = true, features = ["serde"] } @@ -63,6 +74,7 @@ async-trait.workspace = true # misc brotli = { workspace = true, features = ["std"] } +crossbeam-channel.workspace = true derive_more.workspace = true eyre.workspace = true parking_lot.workspace = true diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index ddbd1883..f99a1f83 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -16,17 +16,17 @@ const DEFAULT_CONFIRM_BLOCK_CACHE_SIZE: usize = 1_000; const DEFAULT_TX_CACHE_SIZE: usize = DEFAULT_CONFIRM_BLOCK_CACHE_SIZE * 10_000; #[derive(Debug)] -pub struct ConfirmedBlock { +pub(crate) struct ConfirmedBlock { /// The locally built pending block with execution output. - pub executed_block: ExecutedBlock, + pub(crate) executed_block: ExecutedBlock, /// The receipts for the pending block - pub receipts: Arc>>, + pub(crate) receipts: Arc>>, } impl ConfirmedBlock { /// Returns a pair of [`RecoveredBlock`] and a vector of [`NodePrimitives::Receipt`]s by /// cloning from borrowed self. - pub fn to_block_and_receipts(&self) -> BlockAndReceipts { + pub(crate) fn to_block_and_receipts(&self) -> BlockAndReceipts { BlockAndReceipts { block: self.executed_block.recovered_block.clone(), receipts: self.receipts.clone(), @@ -45,7 +45,7 @@ impl ConfirmedBlock { /// Transaction data is stored in a `HashMap` which indexes transaction hashes to /// [`CachedTxInfo`] for O(1) tx/receipt lookups. #[derive(Debug)] -pub struct ConfirmCache { +pub(crate) struct ConfirmCache { /// Primary storage: block number → (block hash, block + receipts). /// `BTreeMap` ordering enables efficient range-based flush via `split_off`. blocks: BTreeMap)>, @@ -63,7 +63,7 @@ impl Default for ConfirmCache { impl ConfirmCache { /// Creates a new [`ConfirmCache`]. - pub fn new() -> Self { + pub(crate) fn new() -> Self { Self { blocks: BTreeMap::new(), hash_to_number: HashMap::with_capacity(DEFAULT_CONFIRM_BLOCK_CACHE_SIZE), @@ -72,17 +72,17 @@ impl ConfirmCache { } /// Returns the number of cached entries. - pub fn len(&self) -> usize { + pub(crate) fn len(&self) -> usize { self.blocks.len() } /// Returns `true` if the cache is empty. - pub fn is_empty(&self) -> bool { + pub(crate) fn is_empty(&self) -> bool { self.blocks.is_empty() } /// Inserts a confirmed block into the cache, indexed by block number and block hash. - pub fn insert( + pub(crate) fn insert( &mut self, height: u64, executed_block: ExecutedBlock, @@ -118,34 +118,37 @@ impl ConfirmCache { } /// Clears all entries. - pub fn clear(&mut self) { + pub(crate) fn clear(&mut self) { self.tx_index.clear(); self.blocks.clear(); self.hash_to_number.clear(); } /// Returns the block number for the given block hash, if cached. - pub fn number_for_hash(&self, block_hash: &B256) -> Option { + pub(crate) fn number_for_hash(&self, block_hash: &B256) -> Option { self.hash_to_number.get(block_hash).copied() } /// Returns the block hash for the given block number, if cached. - pub fn hash_for_number(&self, block_number: u64) -> Option { + pub(crate) fn hash_for_number(&self, block_number: u64) -> Option { self.blocks.get(&block_number).map(|(hash, _)| *hash) } /// Returns the confirmed block for the given block hash, if present. - pub fn get_block_by_hash(&self, block_hash: &B256) -> Option> { + pub(crate) fn get_block_by_hash(&self, block_hash: &B256) -> Option> { self.get_block_by_number(self.number_for_hash(block_hash)?) } /// Returns the confirmed block for the given block number, if present. - pub fn get_block_by_number(&self, block_number: u64) -> Option> { + pub(crate) fn get_block_by_number(&self, block_number: u64) -> Option> { self.blocks.get(&block_number).map(|(_, entry)| entry.to_block_and_receipts()) } /// Returns the cached transaction info for the given tx hash, if present. - pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option<(CachedTxInfo, BlockAndReceipts)> { + pub(crate) fn get_tx_info( + &self, + tx_hash: &TxHash, + ) -> Option<(CachedTxInfo, BlockAndReceipts)> { let tx_info = self.tx_index.get(tx_hash).cloned()?; let block = self.get_block_by_number(tx_info.block_number)?; Some((tx_info, block)) @@ -155,7 +158,7 @@ impl ConfirmCache { /// ordered newest to oldest (for use with `MemoryOverlayStateProvider`). /// /// Returns an error if state cache pollution detected (non-contiguous blocks). - pub fn get_executed_blocks_up_to_height( + pub(crate) fn get_executed_blocks_up_to_height( &self, target_height: u64, canon_height: u64, @@ -190,7 +193,10 @@ impl ConfirmCache { } /// Removes and returns the confirmed block for the given block number. - pub fn remove_block_by_number(&mut self, block_number: u64) -> Option> { + pub(crate) fn remove_block_by_number( + &mut self, + block_number: u64, + ) -> Option> { let (hash, block) = self.blocks.remove(&block_number)?; self.hash_to_number.remove(&hash); self.remove_tx_index_for_block(&block); @@ -198,7 +204,7 @@ impl ConfirmCache { } /// Removes and returns the confirmed block for the given block hash. - pub fn remove_block_by_hash(&mut self, block_hash: &B256) -> Option> { + pub(crate) fn remove_block_by_hash(&mut self, block_hash: &B256) -> Option> { let number = self.hash_to_number.remove(block_hash)?; let (_, block) = self.blocks.remove(&number)?; self.remove_tx_index_for_block(&block); @@ -216,7 +222,7 @@ impl ConfirmCache { /// /// Called when the canonical chain catches up to the confirmed cache. Returns /// the number of entries flushed. - pub fn flush_up_to_height(&mut self, canon_height: u64) -> usize { + pub(crate) fn flush_up_to_height(&mut self, canon_height: u64) -> usize { let retained = self.blocks.split_off(&(canon_height + 1)); let stale = std::mem::replace(&mut self.blocks, retained); let count = stale.len(); @@ -520,7 +526,7 @@ mod tests { let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 3); let block_hash = block.recovered_block.hash(); let tx_hashes: Vec<_> = - block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + block.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); cache.insert(1, block, receipts).expect("insert"); for (i, tx_hash) in tx_hashes.iter().enumerate() { @@ -537,7 +543,7 @@ mod tests { let mut cache = ConfirmCache::::new(); let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); let tx_hashes: Vec<_> = - block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + block.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); cache.insert(1, block, receipts).expect("insert"); cache.flush_up_to_height(1); @@ -551,7 +557,7 @@ mod tests { let mut cache = ConfirmCache::::new(); let (block, receipts) = make_executed_block_with_txs(5, B256::ZERO, 0, 2); let tx_hashes: Vec<_> = - block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + block.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); cache.insert(5, block, receipts).expect("insert"); cache.remove_block_by_number(5); @@ -586,13 +592,13 @@ mod tests { let mut cache = ConfirmCache::::new(); let (block1, receipts1) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); let tx_hashes_1: Vec<_> = - block1.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + block1.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); let parent = block1.recovered_block.hash(); cache.insert(1, block1, receipts1).expect("insert 1"); let (block2, receipts2) = make_executed_block_with_txs(2, parent, 100, 2); let tx_hashes_2: Vec<_> = - block2.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + block2.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); cache.insert(2, block2, receipts2).expect("insert 2"); cache.flush_up_to_height(1); @@ -609,7 +615,7 @@ mod tests { let mut cache = ConfirmCache::::new(); let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); let tx_hashes: Vec<_> = - block.recovered_block.body().transactions().map(|tx| *tx.tx_hash()).collect(); + block.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); cache.insert(1, block, receipts).expect("insert"); cache.clear(); diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 17b99eb7..c033ad9f 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -3,11 +3,12 @@ pub mod pending; pub(crate) mod raw; pub(crate) mod utils; -pub use confirm::ConfirmCache; +pub(crate) use confirm::ConfirmCache; +pub(crate) use raw::RawFlashblocksCache; + pub use pending::PendingSequence; -pub use raw::RawFlashblocksCache; -use crate::{FlashblockCachedReceipt, PendingSequenceRx}; +use crate::PendingSequenceRx; use parking_lot::RwLock; use std::sync::Arc; use tokio::sync::watch; @@ -20,6 +21,13 @@ use reth_chain_state::{ExecutedBlock, MemoryOverlayStateProvider}; use reth_primitives_traits::{NodePrimitives, ReceiptTy, SealedHeaderFor}; use reth_rpc_eth_types::block::BlockAndReceipts; use reth_storage_api::StateProviderBox; +use reth_trie_db::ChangesetCache; + +/// The minimum number of blocks to retain in the changeset cache after eviction. +/// +/// This ensures that recent trie changesets are kept in memory for potential reorgs, +/// even when the finalized block is not set (e.g., on L2s like Optimism). +const CHANGESET_CACHE_RETENTION_BLOCKS: u64 = 64; /// Cached transaction info (block context, receipt and tx data) for O(1) lookups /// by transaction hash. @@ -53,37 +61,42 @@ pub struct CachedTxInfo { /// state, ensuring atomic operations across pending, confirmed, and height /// state (e.g. reorg detection + flush + insert in `handle_confirmed_block`). #[derive(Debug, Clone)] -pub struct FlashblockStateCache -where - N::Receipt: FlashblockCachedReceipt, -{ +pub struct FlashblockStateCache { inner: Arc>>, + changeset_cache: ChangesetCache, } // FlashblockStateCache read interfaces -impl FlashblockStateCache -where - N::Receipt: FlashblockCachedReceipt, -{ +impl FlashblockStateCache { /// Creates a new [`FlashblockStateCache`]. pub fn new() -> Self { - Self { inner: Arc::new(RwLock::new(FlashblockStateCacheInner::new())) } + Self { + inner: Arc::new(RwLock::new(FlashblockStateCacheInner::new())), + changeset_cache: ChangesetCache::new(), + } } } // FlashblockStateCache read height interfaces -impl FlashblockStateCache -where - N::Receipt: FlashblockCachedReceipt, -{ +impl FlashblockStateCache { + /// Returns the changeset cache. + pub fn get_changeset_cache(&self) -> ChangesetCache { + self.changeset_cache.clone() + } + /// Returns the current confirmed height. pub fn get_confirm_height(&self) -> u64 { self.inner.read().confirm_height } - /// Returns the current pending height, if any. - pub fn get_pending_height(&self) -> Option { - self.inner.read().pending_cache.as_ref().map(|p| p.get_height()) + /// Return the current canonical height, if any. + pub fn get_canon_height(&self) -> u64 { + self.inner.read().canon_info.0 + } + + /// Returns a clone of the current pending sequence, if any. + pub fn get_pending_sequence(&self) -> Option> { + self.inner.read().pending_cache.clone() } pub fn get_rpc_block_by_id(&self, block_id: Option) -> Option> { @@ -119,25 +132,27 @@ where self.inner.read().get_tx_info(tx_hash) } - /// Returns a cloned watch receiver for pending sequence updates. - /// Used by `eth_sendRawTransactionSync` to watch for sub-block preconfirmation. + /// Returns a cloned watch receiver for pending sequence state updates. pub fn subscribe_pending_sequence(&self) -> PendingSequenceRx { self.inner.read().subscribe_pending_sequence() } - /// Creates a `StateProviderBox` that overlays the flashblock execution state on top of the - /// canonical state for the given block ID. + /// Instantiates a `MemoryOverlayStateProvider` that overlays the flashblock + /// execution state on top of the canonical state for the given block ID. + /// + /// 1. Block number/hash - all block overlays in the cache up to that block. + /// 2. `Pending` - all block overlays in the flashblocks state cache, which + /// includes the current pending executed block state. + /// 3. `Latest` - all block overlays in the confirm cache up to the confirm + /// height. /// - /// For a specific block number/hash, returns all confirm cache blocks up to that height. - /// For `Pending`, it also includes the current pending executed block state. - /// For `Latest`, resolves to the confirm height. /// Returns `None` if the target block is not in the flashblocks cache. pub fn get_state_provider_by_id( &self, block_id: Option, canonical_state: StateProviderBox, ) -> Option<(StateProviderBox, SealedHeaderFor)> { - let mut guard = self.inner.write(); + let guard = self.inner.read(); let block = match block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)) { BlockId::Number(id) => match id { BlockNumberOrTag::Pending => guard.get_pending_block(), @@ -149,27 +164,80 @@ where }? .block; let block_num = block.number(); - let in_memory = guard.get_state_provider_at_height(block_num, canonical_state)?; - Some((in_memory, block.clone_sealed_header())) + let in_memory = guard.get_executed_blocks_up_to_height(block_num); + drop(guard); + + let in_memory = match in_memory { + Ok(blocks) => blocks, + Err(e) => { + warn!(target: "flashblocks", "Failed to get flashblocks state provider: {e}. Flushing cache"); + self.inner.write().flush(); + None + } + }?; + Some(( + Box::new(MemoryOverlayStateProvider::new(canonical_state, in_memory)), + block.clone_sealed_header(), + )) } + /// Instantiates a `MemoryOverlayStateProvider` with all block overlays in + /// the flashblocks state cache, including the current pending executed + /// block state. pub fn get_pending_state_provider( &self, canonical_state: StateProviderBox, ) -> Option<(StateProviderBox, SealedHeaderFor)> { - let mut guard = self.inner.write(); + let guard = self.inner.read(); let block = guard.get_pending_block()?.block; let block_num = block.number(); - let in_memory = guard.get_state_provider_at_height(block_num, canonical_state)?; - Some((in_memory, block.clone_sealed_header())) + let in_memory = guard.get_executed_blocks_up_to_height(block_num); + drop(guard); + + let in_memory = match in_memory { + Ok(blocks) => blocks, + Err(e) => { + warn!(target: "flashblocks", "Failed to get flashblocks state provider: {e}. Flushing cache"); + self.inner.write().flush(); + None + } + }?; + Some(( + Box::new(MemoryOverlayStateProvider::new(canonical_state, in_memory)), + block.clone_sealed_header(), + )) + } + + /// Returns all available blocks for the given hash that lead back to the + /// canonical chain (from newest to oldest), the parent hash of the oldest + /// returned block, and the sealed header of the specified block hash. + /// + /// Returns `None` if the block for the given hash is not found. + pub fn get_overlay_data( + &self, + block_hash: &B256, + ) -> Option<(Vec>, SealedHeaderFor, B256)> { + let guard = self.inner.read(); + let block = guard.get_block_by_hash(&block_hash)?.block; + let block_num = block.number(); + let canon_hash = guard.get_canon_info().1; + let in_memory = guard.get_executed_blocks_up_to_height(block_num); + drop(guard); + + let in_memory = match in_memory { + Ok(blocks) => blocks, + Err(e) => { + warn!(target: "flashblocks", "Failed to get flashblocks state provider: {e}. Flushing cache"); + self.inner.write().flush(); + None + } + }?; + Some((in_memory, block.clone_sealed_header(), canon_hash)) } } // FlashblockStateCache state mutation interfaces. -impl FlashblockStateCache -where - N::Receipt: FlashblockCachedReceipt, -{ +impl FlashblockStateCache { /// Handles updating the latest pending state by the flashblocks rpc handle. /// /// This method detects when the flashblocks sequencer has advanced to the next @@ -199,17 +267,31 @@ where /// It also detects chainstate re-orgs (set with re-org arg flag) and flashblocks /// state cache pollution. By default once error is detected, we will automatically /// flush the flashblocks state cache. - pub fn handle_canonical_block(&self, block_number: u64, reorg: bool) { - self.inner.write().handle_canonical_block(block_number, reorg) + pub fn handle_canonical_block(&self, canon_info: (u64, B256), reorg: bool) { + debug!( + target: "flashblocks", + canonical_height = canon_info.0, + "Flashblocks state cache received canonical block" + ); + + // Evict trie changesets for blocks below the eviction threshold. + // Keep at least CHANGESET_CACHE_RETENTION_BLOCKS from the persisted tip, and also respect + // the finalized block if set. + let eviction_threshold = canon_info.0.saturating_sub(CHANGESET_CACHE_RETENTION_BLOCKS); + debug!( + target: "flashblocks", + canonical_height = canon_info.0, + eviction_threshold = eviction_threshold, + "Evicting changesets below threshold" + ); + self.changeset_cache.evict(eviction_threshold); + self.inner.write().handle_canonical_block(canon_info, reorg) } } /// Inner state of the flashblocks state cache. #[derive(Debug)] -struct FlashblockStateCacheInner -where - N::Receipt: FlashblockCachedReceipt, -{ +struct FlashblockStateCacheInner { /// The current in-progress pending flashblock sequence, if any. pending_cache: Option>, /// Cache of confirmed flashblock sequences ahead of the canonical chain. @@ -218,7 +300,7 @@ where /// is uninitialized, the confirm height is set to 0. confirm_height: u64, /// Highest confirmed block height in the canonical chainstate. - canon_height: u64, + canon_info: (u64, B256), /// Receiver of the most recent executed [`PendingSequence`] built from the latest /// flashblocks sequence. pending_sequence_rx: PendingSequenceRx, @@ -227,10 +309,7 @@ where pending_sequence_tx: watch::Sender>>, } -impl FlashblockStateCacheInner -where - N::Receipt: FlashblockCachedReceipt, -{ +impl FlashblockStateCacheInner { fn new() -> Self { let (tx, rx) = watch::channel(None); @@ -238,7 +317,7 @@ where pending_cache: None, confirm_cache: ConfirmCache::new(), confirm_height: 0, - canon_height: 0, + canon_info: (0, B256::ZERO), pending_sequence_rx: rx, pending_sequence_tx: tx, } @@ -295,11 +374,11 @@ where sequence.pending.receipts, )?; self.pending_cache = Some(pending_sequence.clone()); - self.pending_sequence_tx.send(Some(pending_sequence)); + let _ = self.pending_sequence_tx.send(Some(pending_sequence)); } else if pending_height == expected_height { // Replace the existing pending sequence self.pending_cache = Some(pending_sequence.clone()); - self.pending_sequence_tx.send(Some(pending_sequence)); + let _ = self.pending_sequence_tx.send(Some(pending_sequence)); } else { return Err(eyre::eyre!( "polluted state cache - not next consecutive pending height block" @@ -308,13 +387,13 @@ where Ok(()) } - fn handle_canonical_block(&mut self, canon_height: u64, reorg: bool) { + fn handle_canonical_block(&mut self, canon_info: (u64, B256), reorg: bool) { let pending_stale = - self.pending_cache.as_ref().is_some_and(|p| p.get_height() <= canon_height); + self.pending_cache.as_ref().is_some_and(|p| p.get_height() <= canon_info.0); if pending_stale || reorg { warn!( target: "flashblocks", - canonical_height = canon_height, + canonical_height = canon_info.0, cache_height = self.confirm_height, canonical_reorg = reorg, pending_stale = pending_stale, @@ -324,15 +403,16 @@ where } else { debug!( target: "flashblocks", - canonical_height = canon_height, + canonical_height = canon_info.0, cache_height = self.confirm_height, - "Flashblocks state cache received canonical block, flushing confirm cache up to canonical height" + "Evicting flashblocks state inner cache" ); - self.confirm_cache.flush_up_to_height(canon_height); + + self.confirm_cache.flush_up_to_height(canon_info.0); } // Update state heights - self.canon_height = canon_height; - self.confirm_height = self.confirm_height.max(canon_height); + self.canon_info = canon_info; + self.confirm_height = self.confirm_height.max(canon_info.0); } pub fn get_confirmed_block(&self) -> Option> { @@ -343,6 +423,10 @@ where self.pending_cache.as_ref().map(|p| p.get_block_and_receipts()) } + pub fn get_canon_info(&self) -> (u64, B256) { + self.canon_info + } + pub fn get_block_by_number(&self, num: u64) -> Option> { if let Some(pending_sequence) = self.pending_cache.as_ref() && pending_sequence.get_height() == num @@ -368,15 +452,47 @@ where .or_else(|| self.confirm_cache.get_tx_info(tx_hash)) } - /// Returns all `ExecutedBlock`s up to `target_height`. + /// Returns the ordered vector of `ExecutedBlock`s from the cache. + /// + /// # Safety of the overlay + /// The returned blocks used for state overlay is correct **if and only if** the + /// blocks form a contiguous chain from some height down to `canonical_height + 1` + /// (or `canonical_height` itself in the redundant-but-safe race case). + /// + /// **Safe (redundant overlap)**: Due to a race between canonical commit and confirm + /// cache flush, the lowest overlay block may be equal to or lower than the canonical + /// height. + /// + /// For example, canonical is at height `x` and the overlay contains `[x+2, x+1, x]`. + /// This is safe the overlay blocks are checked first (newest-to-oldest). The state + /// at height `x` contains changes identical to what canonical already applied, so + /// the result is correct regardless of which source resolves the query. + /// + /// **State inconsistency (gap in overlay)**: If an intermediate block is missing, + /// for example overlay has `[x+2, x]` but not `x+1`, then any account modified only + /// at height `x+1` would be invisible — the query falls through to canonical which + /// returns stale incorrect state. + /// + /// **State inconsistency (canonical too far behind)**: If the canonical height is + /// more than one block below the lowest overlay block. For example, canonical at + /// `x-2`, lowest overlay at `x`, then changes at height `x-1` are not covered by + /// either source. + /// + /// Both failure modes reduce to: every height between `canonical_height + 1` and + /// the target must be present in the overlay. This invariant is naturally maintained + /// by `handle_confirmed_block` (rejects non-consecutive heights) and the pending + /// block always being `confirm_height + 1`. + /// + /// On validation failure (non-contiguous overlay or gap to canonical), the cache is + /// flushed and `None` is returned. fn get_executed_blocks_up_to_height( &self, target_height: u64, ) -> eyre::Result>>> { if self.confirm_height == 0 - || self.canon_height == 0 + || self.canon_info.0 == 0 || target_height > self.confirm_height + 1 - || target_height <= self.canon_height + || target_height <= self.canon_info.0 { // Cache not initialized or target height is outside the cache range return Ok(None); @@ -389,7 +505,7 @@ where } blocks.extend( self.confirm_cache - .get_executed_blocks_up_to_height(target_height, self.canon_height)?, + .get_executed_blocks_up_to_height(target_height, self.canon_info.0)?, ); Ok(Some(blocks)) } @@ -397,53 +513,4 @@ where pub fn subscribe_pending_sequence(&self) -> PendingSequenceRx { self.pending_sequence_rx.clone() } - - /// Instantiates a `MemoryOverlayStateProvider` by getting the ordered `ExecutedBlock`s - /// from the cache, and overlaying them on top of the canonical state provider. - /// - /// # Safety of the overlay - /// The returned blocks are meant to be layered on top of a canonical `StateProviderBox` - /// via `MemoryOverlayStateProvider`. This is correct **if and only if** the overlay - /// blocks form a contiguous chain from some height down to `canonical_height + 1` - /// (or `canonical_height` itself in the redundant-but-safe race case). - /// - /// **Safe (redundant overlap)**: Due to a race between canonical commit and confirm - /// cache flush, the lowest overlay block may equal the canonical height. For example, - /// canonical is at height `x` and the overlay contains `[x+2, x+1, x]`. This is safe - /// because `MemoryOverlayStateProvider` checks overlay blocks first (newest-to-oldest) - /// — the duplicate `BundleState` at height `x` contains changes identical to what - /// canonical already applied, so the result is correct regardless of which source - /// resolves the query. - /// - /// **State inconsistency (gap in overlay)**: If an intermediate block is missing (e.g. - /// overlay has `[x+2, x]` but not `x+1`), any account modified only at height `x+1` - /// would be invisible — the query falls through to canonical, returning stale state. - /// - /// **State inconsistency (canonical too far behind)**: If the canonical height is more - /// than one block below the lowest overlay block (e.g. canonical at `x-2`, lowest overlay - /// at `x`), changes at height `x-1` are not covered by either source. - /// - /// Both failure modes reduce to: every height between `canonical_height + 1` and the - /// target must be present in the overlay. This invariant is naturally maintained by - /// `handle_confirmed_block` (rejects non-consecutive heights) and the pending block always - /// being `confirm_height + 1`. - /// - /// On validation failure (non-contiguous overlay or gap to canonical), the cache is - /// flushed and `None` is returned. - pub fn get_state_provider_at_height( - &mut self, - height: u64, - canonical_state: StateProviderBox, - ) -> Option { - let in_memory = match self.get_executed_blocks_up_to_height(height) { - Ok(Some(blocks)) => blocks, - Ok(None) => return None, - Err(e) => { - warn!(target: "flashblocks", "Failed to get flashblocks state provider: {e}. Flushing cache"); - self.flush(); - return None; - } - }; - Some(Box::new(MemoryOverlayStateProvider::new(canonical_state, in_memory))) - } } diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs index 27d49d00..15ef2c1d 100644 --- a/crates/flashblocks/src/cache/pending.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -1,47 +1,30 @@ -use crate::{cache::CachedTxInfo, FlashblockCachedReceipt}; +use crate::{cache::CachedTxInfo, execution::PrefixExecutionMeta}; use derive_more::Deref; use std::collections::HashMap; use alloy_consensus::BlockHeader; use alloy_primitives::{TxHash, B256}; use reth_primitives_traits::NodePrimitives; -use reth_revm::cached::CachedReads; use reth_rpc_eth_types::{block::BlockAndReceipts, PendingBlock}; -/// The pending flashblocks sequence built with all received OpFlashblockPayload +/// The pending flashblocks sequence built with all received `OpFlashblockPayload` /// alongside the metadata for the last added flashblock. #[derive(Debug, Clone, Deref)] -pub struct PendingSequence -where - N::Receipt: FlashblockCachedReceipt, -{ +pub struct PendingSequence { /// Locally built full pending block of the latest flashblocks sequence. #[deref] pub pending: PendingBlock, /// Transaction index: tx hash → cached tx info for O(1) tx/receipt lookups. pub tx_index: HashMap>, - /// Cached reads from execution for reuse. - pub cached_reads: CachedReads, /// The current block hash of the latest flashblocks sequence. pub block_hash: B256, /// Parent hash of the built block (may be non-canonical or canonical). pub parent_hash: B256, - /// The last flashblock index of the latest flashblocks sequence. - pub last_flashblock_index: u64, - /// Cached number of transactions covered by the pending sequence execution. - cached_tx_count: usize, - /// Cached receipts for the prefix. - pub cached_receipts: Vec, - /// Total gas used by the pending sequence. - pub cached_gas_used: u64, - /// Total blob/DA gas used by the pending sequence. - pub cached_blob_gas_used: u64, + /// Prefix execution metadata for incremental builds. + pub prefix_execution_meta: PrefixExecutionMeta, } -impl PendingSequence -where - N::Receipt: FlashblockCachedReceipt, -{ +impl PendingSequence { pub fn get_hash(&self) -> B256 { self.block_hash } @@ -85,14 +68,9 @@ mod tests { PendingSequence { pending: pending_block, tx_index: HashMap::new(), - cached_reads: Default::default(), block_hash, parent_hash, - last_flashblock_index: 0, - cached_tx_count: 0, - cached_receipts: vec![], - cached_gas_used: 0, - cached_blob_gas_used: 0, + prefix_execution_meta: Default::default(), } } @@ -123,14 +101,9 @@ mod tests { PendingSequence { pending: pending_block, tx_index, - cached_reads: Default::default(), block_hash, parent_hash, - last_flashblock_index: 0, - cached_tx_count: 0, - cached_receipts: vec![], - cached_gas_used: 0, - cached_blob_gas_used: 0, + prefix_execution_meta: Default::default(), } } diff --git a/crates/flashblocks/src/execution/assemble.rs b/crates/flashblocks/src/execution/assemble.rs new file mode 100644 index 00000000..b426aa34 --- /dev/null +++ b/crates/flashblocks/src/execution/assemble.rs @@ -0,0 +1,118 @@ +use alloy_consensus::{ + constants::EMPTY_WITHDRAWALS, Block, BlockBody, Header, EMPTY_OMMER_ROOT_HASH, +}; +use alloy_eips::{eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE}; +use alloy_primitives::{Bloom, B256}; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; +use reth_errors::BlockExecutionError; +use reth_optimism_consensus::isthmus; +use reth_optimism_forks::OpHardforks; +use reth_provider::StateProvider; +use reth_revm::db::BundleState; + +/// Input for [`assemble_flashblock`] bundling pre-computed roots and execution data. +pub(crate) struct FlashblockAssemblerInput<'a, T> { + /// The flashblock base payload attributes. + pub base: &'a OpFlashblockPayloadBase, + /// Pre-computed state root. + pub state_root: B256, + /// Pre-computed transaction root. + pub transactions_root: B256, + /// Pre-computed receipts root. + pub receipts_root: B256, + /// Pre-computed logs bloom. + pub logs_bloom: Bloom, + /// Total gas used by the block. + pub gas_used: u64, + /// Total blob gas used by the block. + pub blob_gas_used: u64, + /// Bundle state from execution (for isthmus `withdrawals_root` computation). + pub bundle_state: &'a BundleState, + /// State provider for isthmus `withdrawals_root` computation. + pub state_provider: &'a dyn StateProvider, + /// Signed transactions for the block body. + pub transactions: Vec, +} + +/// Assembles a flashblock ([`Block`]) from pre-computed roots and execution output. +/// +/// Mirrors `OpBlockAssembler::assemble_block()` for hardfork-dependent header fields +/// (`withdrawals_root`, `requests_hash`, `blob_gas_used`, `excess_blob_gas`) but uses +/// pre-computed `transactions_root`, `receipts_root`, `logs_bloom`, and `state_root` +/// directly instead of recomputing them. +pub(crate) fn assemble_flashblock( + chain_spec: &ChainSpec, + input: FlashblockAssemblerInput<'_, T>, +) -> Result, BlockExecutionError> +where + ChainSpec: OpHardforks, +{ + let FlashblockAssemblerInput { + base, + state_root, + transactions_root, + receipts_root, + logs_bloom, + gas_used, + blob_gas_used, + bundle_state, + state_provider, + transactions, + } = input; + + let timestamp = base.timestamp; + let mut requests_hash = None; + + let withdrawals_root = if chain_spec.is_isthmus_active_at_timestamp(timestamp) { + requests_hash = Some(EMPTY_REQUESTS_HASH); + Some( + isthmus::withdrawals_root(bundle_state, state_provider) + .map_err(BlockExecutionError::other)?, + ) + } else if chain_spec.is_canyon_active_at_timestamp(timestamp) { + Some(EMPTY_WITHDRAWALS) + } else { + None + }; + + let (excess_blob_gas, blob_gas_used) = if chain_spec.is_jovian_active_at_timestamp(timestamp) { + (Some(0), Some(blob_gas_used)) + } else if chain_spec.is_ecotone_active_at_timestamp(timestamp) { + (Some(0), Some(0)) + } else { + (None, None) + }; + + let header = Header { + parent_hash: base.parent_hash, + ommers_hash: EMPTY_OMMER_ROOT_HASH, + beneficiary: base.fee_recipient, + state_root, + transactions_root, + receipts_root, + withdrawals_root, + logs_bloom, + timestamp, + mix_hash: base.prev_randao, + nonce: BEACON_NONCE.into(), + base_fee_per_gas: Some(base.base_fee_per_gas.saturating_to()), + number: base.block_number, + gas_limit: base.gas_limit, + difficulty: Default::default(), + gas_used, + extra_data: base.extra_data.clone(), + parent_beacon_block_root: Some(base.parent_beacon_block_root), + blob_gas_used, + excess_blob_gas, + requests_hash, + }; + + Ok(Block::new( + header, + BlockBody { + transactions, + ommers: Default::default(), + withdrawals: chain_spec.is_canyon_active_at_timestamp(timestamp).then(Default::default), + }, + )) +} diff --git a/crates/flashblocks/src/execution/cache.rs b/crates/flashblocks/src/execution/cache.rs deleted file mode 100644 index 4c2efa92..00000000 --- a/crates/flashblocks/src/execution/cache.rs +++ /dev/null @@ -1,675 +0,0 @@ -//! Execution caching for flashblock building. -//! -//! When flashblocks arrive incrementally, each new flashblock triggers a rebuild of pending -//! state from all transactions in the sequence. To ensure that the incoming flashblocks -//! are incrementally re-built, from their sequence, the execution cache stores the cumulative -//! bundle state from previous executions. This ensures that states are not re-read from disk -//! for accounts/storage that were already loaded in previous builds. -//! -//! # Approach -//! -//! This module caches the cumulative bundle state from previous executions. When the next -//! flashblock arrives, if its transaction list is a continuation of the cached list, the -//! cached bundle can be used as a **prestate** for the State builder. This avoids redundant -//! disk reads for accounts/storage that were already modified. -//! -//! The cache stores: -//! - Ordered list of executed transaction hashes (for prefix matching) -//! - Cumulative bundle state after all cached transactions (used as prestate) -//! - Cumulative receipts for all cached transactions (for future optimization) -//! - Block-level execution metadata for cached transactions (gas/requests) -//! -//! # Example -//! -//! ```text -//! Flashblock 0: txs [A, B] -//! -> Execute A, B from scratch (cold state reads) -//! -> Cache: txs=[A,B], bundle=state_after_AB -//! -//! Flashblock 1: txs [A, B, C] -//! -> Prefix [A, B] matches cache -//! -> Use cached bundle as prestate (warm state) -//! -> Execute A, B, C (A, B hit prestate cache, faster) -//! -> Cache: txs=[A,B,C], bundle=state_after_ABC -//! -//! Flashblock 2 (reorg): txs [A, D, E] -//! -> Prefix [A] matches, but tx[1]=D != B -//! -> Cached prestate may be partially useful, but diverges -//! -> Execute A, D, E -//! ``` - -use alloy_eips::eip7685::Requests; -use alloy_primitives::B256; -use reth_primitives_traits::NodePrimitives; -use reth_revm::db::BundleState; - -/// Cached block-level execution metadata for the stored transaction prefix. -#[derive(Debug, Clone, Default, PartialEq, Eq)] -pub(crate) struct CachedExecutionMeta { - /// EIP-7685 requests emitted while executing the cached prefix. - pub requests: Requests, - /// Total gas used by the cached prefix. - pub gas_used: u64, - /// Total blob/DA gas used by the cached prefix. - pub blob_gas_used: u64, -} - -/// Resumable cached state plus execution metadata for the cached prefix. -pub(crate) type ResumableState<'a, N> = - (&'a BundleState, &'a [::Receipt], &'a Requests, u64, u64, usize); - -/// Cache of transaction execution results for a single block. -/// -/// Stores cumulative execution state that can be used as a prestate to avoid -/// redundant disk reads when re-executing transactions. The cached bundle provides -/// warm state for accounts/storage already loaded, improving execution performance. -/// -/// **Note**: This cache does NOT skip transaction execution - all transactions must -/// still be executed to populate the block body. The cache only optimizes state reads. -/// -/// The cache is invalidated when: -/// - A new block starts (different block number) -/// - Parent hash changes for parent-scoped lookups -/// - A reorg is detected (transaction list diverges from cached prefix) -/// - Explicitly cleared -#[derive(Debug)] -pub struct TransactionCache { - /// Block number this cache is valid for. - block_number: u64, - /// Parent hash this cache is valid for. - cached_parent_hash: Option, - /// Ordered list of transaction hashes that have been executed. - executed_tx_hashes: Vec, - /// Cumulative bundle state after executing all cached transactions. - cumulative_bundle: BundleState, - /// Receipts for all cached transactions, in execution order. - receipts: Vec, - /// Cached block-level execution metadata. - execution_meta: CachedExecutionMeta, -} - -impl Default for TransactionCache { - fn default() -> Self { - Self::new() - } -} - -impl TransactionCache { - /// Creates a new empty transaction cache. - pub fn new() -> Self { - Self { - block_number: 0, - cached_parent_hash: None, - executed_tx_hashes: Vec::new(), - cumulative_bundle: BundleState::default(), - receipts: Vec::new(), - execution_meta: CachedExecutionMeta::default(), - } - } - - /// Creates a new cache for a specific block number. - pub fn for_block(block_number: u64) -> Self { - Self { block_number, ..Self::new() } - } - - /// Returns the block number this cache is valid for. - pub const fn block_number(&self) -> u64 { - self.block_number - } - - /// Returns the parent hash this cache is valid for, if tracked. - pub const fn parent_hash(&self) -> Option { - self.cached_parent_hash - } - - /// Checks if this cache is valid for the given block number. - pub const fn is_valid_for_block(&self, block_number: u64) -> bool { - self.block_number == block_number - } - - /// Checks if this cache is valid for the given block number and parent hash. - pub fn is_valid_for_block_parent(&self, block_number: u64, parent_hash: B256) -> bool { - self.block_number == block_number && self.cached_parent_hash == Some(parent_hash) - } - - /// Returns the number of cached transactions. - pub const fn len(&self) -> usize { - self.executed_tx_hashes.len() - } - - /// Returns true if the cache is empty. - pub const fn is_empty(&self) -> bool { - self.executed_tx_hashes.is_empty() - } - - /// Returns the cached transaction hashes. - pub fn executed_tx_hashes(&self) -> &[B256] { - &self.executed_tx_hashes - } - - /// Returns the cached receipts. - pub fn receipts(&self) -> &[N::Receipt] { - &self.receipts - } - - /// Returns the cumulative bundle state. - pub const fn bundle(&self) -> &BundleState { - &self.cumulative_bundle - } - - /// Clears the cache. - pub fn clear(&mut self) { - self.executed_tx_hashes.clear(); - self.cumulative_bundle = BundleState::default(); - self.receipts.clear(); - self.execution_meta = CachedExecutionMeta::default(); - self.block_number = 0; - self.cached_parent_hash = None; - } - - /// Updates the cache for a new block, clearing if the block number changed. - /// - /// Returns true if the cache was cleared. - pub fn update_for_block(&mut self, block_number: u64) -> bool { - if self.block_number == block_number { - false - } else { - self.clear(); - self.block_number = block_number; - true - } - } - - /// Computes the length of the matching prefix between cached transactions - /// and the provided transaction hashes. - /// - /// Returns the number of transactions that can be skipped because they - /// match the cached execution results. - pub fn matching_prefix_len(&self, tx_hashes: &[B256]) -> usize { - self.executed_tx_hashes - .iter() - .zip(tx_hashes.iter()) - .take_while(|(cached, incoming)| cached == incoming) - .count() - } - - /// Returns cached state for resuming execution if the incoming transactions have a - /// matching prefix with the cache. - /// - /// Returns `Some((bundle, receipts, requests, gas_used, blob_gas_used, skip_count))` - /// if there's a non-empty matching prefix, and the full cache matches the incoming - /// prefix, where: - /// - `bundle` is the cumulative state after the matching prefix - /// - `receipts` is the receipts for the matching prefix - /// - `skip_count` is the number of transactions to skip - /// - /// Returns `None` if: - /// - The cache is empty - /// - No prefix matches (first transaction differs) - /// - Block number doesn't match - pub(crate) fn get_resumable_state( - &self, - block_number: u64, - tx_hashes: &[B256], - ) -> Option> { - if !self.is_valid_for_block(block_number) || self.is_empty() { - return None; - } - - let prefix_len = self.matching_prefix_len(tx_hashes); - if prefix_len == 0 { - return None; - } - - // Only return state if the full cache matches (partial prefix would need - // intermediate state snapshots, which we don't currently store). - // Partial match means incoming txs diverge from cache, need to re-execute. - (prefix_len == self.executed_tx_hashes.len()).then_some(( - &self.cumulative_bundle, - self.receipts.as_slice(), - &self.execution_meta.requests, - self.execution_meta.gas_used, - self.execution_meta.blob_gas_used, - prefix_len, - )) - } - - /// Returns cached state for resuming execution if the incoming transactions have a - /// matching prefix with the cache and the parent hash matches. - /// - /// Returns `Some((bundle, receipts, requests, gas_used, blob_gas_used, skip_count))` - /// if there's a non-empty matching prefix, where the full cache matches the incoming prefix, and the - /// `(block_number, parent_hash)` tuple matches the cached scope. - pub(crate) fn get_resumable_state_for_parent( - &self, - block_number: u64, - parent_hash: B256, - tx_hashes: &[B256], - ) -> Option> { - if !self.is_valid_for_block_parent(block_number, parent_hash) || self.is_empty() { - return None; - } - - let prefix_len = self.matching_prefix_len(tx_hashes); - if prefix_len == 0 { - return None; - } - - (prefix_len == self.executed_tx_hashes.len()).then_some(( - &self.cumulative_bundle, - self.receipts.as_slice(), - &self.execution_meta.requests, - self.execution_meta.gas_used, - self.execution_meta.blob_gas_used, - prefix_len, - )) - } - - /// Updates the cache with new execution results. - /// - /// This should be called after executing a flashblock. The provided bundle - /// and receipts should represent the cumulative state after all transactions. - pub fn update( - &mut self, - block_number: u64, - tx_hashes: Vec, - bundle: BundleState, - receipts: Vec, - ) { - self.update_with_execution_meta( - block_number, - tx_hashes, - bundle, - receipts, - CachedExecutionMeta::default(), - ); - } - - /// Updates the cache with new execution results and block-level metadata. - pub(crate) fn update_with_execution_meta( - &mut self, - block_number: u64, - tx_hashes: Vec, - bundle: BundleState, - receipts: Vec, - execution_meta: CachedExecutionMeta, - ) { - self.block_number = block_number; - self.cached_parent_hash = None; - self.executed_tx_hashes = tx_hashes; - self.cumulative_bundle = bundle; - self.receipts = receipts; - self.execution_meta = execution_meta; - } - - /// Updates the cache with new execution results and block-level metadata, scoped to the - /// provided parent hash. - pub(crate) fn update_with_execution_meta_for_parent( - &mut self, - block_number: u64, - parent_hash: B256, - tx_hashes: Vec, - bundle: BundleState, - receipts: Vec, - execution_meta: CachedExecutionMeta, - ) { - self.block_number = block_number; - self.cached_parent_hash = Some(parent_hash); - self.executed_tx_hashes = tx_hashes; - self.cumulative_bundle = bundle; - self.receipts = receipts; - self.execution_meta = execution_meta; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use reth_optimism_primitives::OpPrimitives; - - type TestCache = TransactionCache; - - #[test] - fn test_cache_block_validation() { - let mut cache = TestCache::for_block(100); - assert!(cache.is_valid_for_block(100)); - assert!(!cache.is_valid_for_block(101)); - assert!(!cache.is_valid_for_block_parent(100, B256::repeat_byte(0x11))); - - // Update for same block doesn't clear - assert!(!cache.update_for_block(100)); - - // Update for different block clears - assert!(cache.update_for_block(101)); - assert!(cache.is_valid_for_block(101)); - assert!(cache.parent_hash().is_none()); - } - - #[test] - fn test_cache_clear() { - let mut cache = TestCache::for_block(100); - assert_eq!(cache.block_number(), 100); - - cache.clear(); - assert_eq!(cache.block_number(), 0); - assert!(cache.is_empty()); - } - - #[test] - fn test_matching_prefix_len() { - let mut cache = TestCache::for_block(100); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - let tx_d = B256::repeat_byte(0xDD); - - // Update cache with [A, B] - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); - - // Full match - assert_eq!(cache.matching_prefix_len(&[tx_a, tx_b]), 2); - - // Continuation - assert_eq!(cache.matching_prefix_len(&[tx_a, tx_b, tx_c]), 2); - - // Partial match (reorg at position 1) - assert_eq!(cache.matching_prefix_len(&[tx_a, tx_d, tx_c]), 1); - - // No match (reorg at position 0) - assert_eq!(cache.matching_prefix_len(&[tx_d, tx_b, tx_c]), 0); - - // Empty incoming - assert_eq!(cache.matching_prefix_len(&[]), 0); - } - - #[test] - fn test_get_resumable_state() { - let mut cache = TestCache::for_block(100); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - - // Empty cache returns None - assert!(cache.get_resumable_state(100, &[tx_a, tx_b]).is_none()); - - // Update cache with [A, B] - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); - - // Wrong block number returns None - assert!(cache.get_resumable_state(101, &[tx_a, tx_b]).is_none()); - - // Exact match returns state - let result = cache.get_resumable_state(100, &[tx_a, tx_b]); - assert!(result.is_some()); - let (_, _, _, _, _, skip) = result.unwrap(); - assert_eq!(skip, 2); - - // Continuation returns state (can skip cached txs) - let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); - assert!(result.is_some()); - let (_, _, _, _, _, skip) = result.unwrap(); - assert_eq!(skip, 2); - - // Partial match (reorg) returns None - can't use partial cache - assert!(cache.get_resumable_state(100, &[tx_a, tx_c]).is_none()); - } - - // ==================== E2E Cache Reuse Scenario Tests ==================== - - /// Tests the complete E2E cache scenario: fb0 [A,B] → fb1 [A,B,C] - /// Verifies that cached bundle can be used as prestate for the continuation. - #[test] - fn test_e2e_cache_reuse_continuation_scenario() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - - // Simulate fb0: execute [A, B] from scratch - let fb0_txs = vec![tx_a, tx_b]; - assert!(cache.get_resumable_state(100, &fb0_txs).is_none()); - - // After fb0 execution, update cache - cache.update(100, fb0_txs, BundleState::default(), vec![]); - assert_eq!(cache.len(), 2); - - // Simulate fb1: [A, B, C] - should resume from cached state - let fb1_txs = vec![tx_a, tx_b, tx_c]; - let result = cache.get_resumable_state(100, &fb1_txs); - assert!(result.is_some()); - let (bundle, receipts, _, _, _, skip) = result.unwrap(); - - // skip=2 indicates 2 txs are covered by cached state (for logging) - // Note: All transactions are still executed, skip is informational only - assert_eq!(skip, 2); - // Bundle is used as prestate to warm the State builder - assert!(bundle.state.is_empty()); // Default bundle is empty in test - assert!(receipts.is_empty()); // No receipts in this test - - // After fb1 execution, update cache with full list - cache.update(100, fb1_txs, BundleState::default(), vec![]); - assert_eq!(cache.len(), 3); - } - - /// Tests reorg scenario: fb0 [A, B] → fb1 [A, D, E] - /// Verifies that divergent tx list invalidates cache. - #[test] - fn test_e2e_cache_reorg_scenario() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_d = B256::repeat_byte(0xDD); - let tx_e = B256::repeat_byte(0xEE); - - // fb0: execute [A, B] - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); - - // fb1 (reorg): [A, D, E] - tx[1] diverges, cannot resume - let fb1_txs = vec![tx_a, tx_d, tx_e]; - let result = cache.get_resumable_state(100, &fb1_txs); - assert!(result.is_none()); // Partial match means we can't use cache - } - - /// Tests multi-flashblock progression within same block: - /// fb0 [A] → fb1 [A,B] → fb2 [A,B,C] - /// - /// Each flashblock can use the previous bundle as prestate for warm state reads. - /// Note: All transactions are still executed; skip count is for logging only. - #[test] - fn test_e2e_multi_flashblock_progression() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - - // fb0: [A] - cache.update(100, vec![tx_a], BundleState::default(), vec![]); - assert_eq!(cache.len(), 1); - - // fb1: [A, B] - cached state covers [A] (skip=1 for logging) - let fb1_txs = vec![tx_a, tx_b]; - let result = cache.get_resumable_state(100, &fb1_txs); - assert!(result.is_some()); - assert_eq!(result.unwrap().4, 1); // 1 tx covered by cache - - cache.update(100, fb1_txs, BundleState::default(), vec![]); - assert_eq!(cache.len(), 2); - - // fb2: [A, B, C] - cached state covers [A, B] (skip=2 for logging) - let fb2_txs = vec![tx_a, tx_b, tx_c]; - let result = cache.get_resumable_state(100, &fb2_txs); - assert!(result.is_some()); - assert_eq!(result.unwrap().5, 2); // 2 txs covered by cache - - cache.update(100, fb2_txs, BundleState::default(), vec![]); - assert_eq!(cache.len(), 3); - } - - /// Tests that cache is invalidated on block number change. - #[test] - fn test_e2e_block_transition_clears_cache() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - - // Block 100: cache [A, B] - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); - assert_eq!(cache.len(), 2); - - // Block 101: same txs shouldn't resume (different block) - let result = cache.get_resumable_state(101, &[tx_a, tx_b]); - assert!(result.is_none()); - - // Explicit block update clears cache - cache.update_for_block(101); - assert!(cache.is_empty()); - } - - /// Tests cache behavior with empty transaction list. - #[test] - fn test_cache_empty_transactions() { - let mut cache = TestCache::new(); - - // Empty flashblock (only system tx, no user txs) - cache.update(100, vec![], BundleState::default(), vec![]); - assert!(cache.is_empty()); - - // Can't resume from empty cache - let tx_a = B256::repeat_byte(0xAA); - assert!(cache.get_resumable_state(100, &[tx_a]).is_none()); - } - - /// Documents the semantics of `skip_count`. - /// - /// A resumable state is only returned when the incoming transaction list fully extends the - /// cached list. In that case, `skip_count` is the number of prefix transactions covered by - /// cached execution output. - #[test] - fn test_skip_count_matches_cached_prefix_len() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - - // Cache state after executing [A, B] - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); - - // get_resumable_state returns skip=2 for prefix [A, B] - let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); - assert!(result.is_some()); - let (bundle, _receipts, _, _, _, skip_count) = result.unwrap(); - - // skip_count indicates cached prefix length - assert_eq!(skip_count, 2); - - // The bundle is the important part - used as resumable prestate. - assert!(bundle.state.is_empty()); // Default in test, real one has state - } - - /// Tests that receipts are properly cached and returned. - #[test] - fn test_cache_preserves_receipts() { - use op_alloy_consensus::OpReceipt; - use reth_optimism_primitives::OpPrimitives; - - let mut cache: TransactionCache = TransactionCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - - // Create mock receipts - let receipt_a = OpReceipt::Legacy(alloy_consensus::Receipt { - status: alloy_consensus::Eip658Value::Eip658(true), - cumulative_gas_used: 21000, - logs: vec![], - }); - let receipt_b = OpReceipt::Legacy(alloy_consensus::Receipt { - status: alloy_consensus::Eip658Value::Eip658(true), - cumulative_gas_used: 42000, - logs: vec![], - }); - - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![receipt_a, receipt_b]); - - // Verify receipts are preserved - assert_eq!(cache.receipts().len(), 2); - - // On resumable state, receipts are returned - let tx_c = B256::repeat_byte(0xCC); - let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); - assert!(result.is_some()); - let (_, receipts, _, _, _, _) = result.unwrap(); - assert_eq!(receipts.len(), 2); - } - - #[test] - fn test_cache_preserves_execution_meta() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - - let mut requests = Requests::default(); - requests.push_request_with_type(0x01, [0xAA, 0xBB]); - - cache.update_with_execution_meta( - 100, - vec![tx_a, tx_b], - BundleState::default(), - vec![], - CachedExecutionMeta { - requests: requests.clone(), - gas_used: 42_000, - blob_gas_used: 123, - }, - ); - - let resumable = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); - assert!(resumable.is_some()); - let (_, _, cached_requests, gas_used, blob_gas_used, skip_count) = resumable.unwrap(); - assert_eq!(skip_count, 2); - assert_eq!(gas_used, 42_000); - assert_eq!(blob_gas_used, 123); - assert_eq!(cached_requests, &requests); - } - - #[test] - fn test_cache_parent_scoping() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - let parent_a = B256::repeat_byte(0x11); - let parent_b = B256::repeat_byte(0x22); - - cache.update_with_execution_meta_for_parent( - 100, - parent_a, - vec![tx_a, tx_b], - BundleState::default(), - vec![], - CachedExecutionMeta { - requests: Requests::default(), - gas_used: 42_000, - blob_gas_used: 0, - }, - ); - - // Matching block + parent should hit. - let hit = cache.get_resumable_state_for_parent(100, parent_a, &[tx_a, tx_b, tx_c]); - assert!(hit.is_some()); - - // Same block but different parent should miss. - let miss = cache.get_resumable_state_for_parent(100, parent_b, &[tx_a, tx_b, tx_c]); - assert!(miss.is_none()); - } -} diff --git a/crates/flashblocks/src/execution/mod.rs b/crates/flashblocks/src/execution/mod.rs index 751b3fd3..39bcef36 100644 --- a/crates/flashblocks/src/execution/mod.rs +++ b/crates/flashblocks/src/execution/mod.rs @@ -1,5 +1,97 @@ -mod cache; -use cache::{CachedExecutionMeta, TransactionCache}; +pub(crate) mod assemble; +pub(crate) mod validator; -pub(crate) mod worker; -pub use worker::{BuildArgs, BuildResult, FlashblockCachedReceipt}; +use alloy_eips::eip4895::Withdrawal; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; + +use reth_optimism_primitives::OpReceipt; +use reth_provider::{ + BlockNumReader, ChangeSetReader, DatabaseProviderFactory, PruneCheckpointReader, + StageCheckpointReader, StorageChangeSetReader, StorageSettingsCache, +}; +use reth_revm::cached::CachedReads; + +pub(crate) struct BuildArgs { + pub(crate) base: OpFlashblockPayloadBase, + pub(crate) transactions: I, + pub(crate) withdrawals: Vec, + pub(crate) start_flashblock_index: u64, + pub(crate) last_flashblock_index: u64, +} + +/// Cached prefix execution data used to resume canonical builds. +#[derive(Debug, Clone, Default)] +pub(crate) struct PrefixExecutionMeta { + /// Cached reads from execution for reuse. + pub cached_reads: CachedReads, + /// Number of leading transactions covered by cached execution. + pub(crate) cached_tx_count: usize, + /// Total gas used by the cached prefix. + pub(crate) gas_used: u64, + /// Total blob/DA gas used by the cached prefix. + pub(crate) blob_gas_used: u64, + /// The last flashblock index of the latest flashblocks sequence. + pub(crate) last_flashblock_index: u64, +} + +/// Strategy describing how to compute the state root. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum StateRootStrategy { + /// Use the state root task (background sparse trie computation). + StateRootTask, + /// Run the parallel state root computation on the calling thread. + Parallel, + /// Fall back to synchronous computation via the state provider. + Synchronous, +} + +/// Receipt requirements for cache-resume flow. +pub(crate) trait FlashblockReceipt: Clone { + /// Adds `gas_offset` to each receipt's `cumulative_gas_used`. + fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64); +} + +impl FlashblockReceipt for OpReceipt { + fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64) { + if gas_offset == 0 { + return; + } + for receipt in receipts { + let inner = receipt.as_receipt_mut(); + inner.cumulative_gas_used = inner.cumulative_gas_used.saturating_add(gas_offset); + } + } +} + +/// Trait alias for the bounds required on a provider factory to create an +/// [`OverlayStateProviderFactory`] that supports parallel and serial state +/// root computation. +pub(crate) trait OverlayProviderFactory: + DatabaseProviderFactory< + Provider: StageCheckpointReader + + PruneCheckpointReader + + BlockNumReader + + ChangeSetReader + + StorageChangeSetReader + + StorageSettingsCache, + > + Clone + + Send + + Sync + + 'static +{ +} + +impl OverlayProviderFactory for T where + T: DatabaseProviderFactory< + Provider: StageCheckpointReader + + PruneCheckpointReader + + BlockNumReader + + ChangeSetReader + + StorageChangeSetReader + + StorageSettingsCache, + > + Clone + + Send + + Sync + + 'static +{ +} diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs new file mode 100644 index 00000000..ce09fbd4 --- /dev/null +++ b/crates/flashblocks/src/execution/validator.rs @@ -0,0 +1,1041 @@ +use crate::{ + cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}, + execution::{ + assemble::{assemble_flashblock, FlashblockAssemblerInput}, + BuildArgs, FlashblockReceipt, OverlayProviderFactory, PrefixExecutionMeta, + StateRootStrategy, + }, +}; +use std::{ + collections::HashMap, + convert::Infallible, + panic::{self, AssertUnwindSafe}, + sync::{mpsc::RecvTimeoutError, Arc}, + time::{Duration, Instant}, +}; +use tracing::*; + +use alloy_consensus::{proofs::calculate_transaction_root, BlockHeader}; +use alloy_eip7928::BlockAccessList; +use alloy_eips::eip2718::{Encodable2718, WithEncoded}; +use alloy_evm::block::ExecutableTxParts; +use alloy_primitives::{Address, B256}; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; + +use reth_chain_state::{DeferredTrieData, ExecutedBlock, LazyOverlay}; +use reth_engine_primitives::TreeConfig; +use reth_engine_tree::tree::{ + payload_processor::{ + receipt_root_task::{IndexedReceipt, ReceiptRootTaskHandle}, + ExecutionEnv, PayloadProcessor, + }, + sparse_trie::StateRootComputeOutcome, + CachedStateProvider, PayloadHandle, StateProviderBuilder, +}; +use reth_errors::BlockExecutionError; +use reth_errors::RethError; +use reth_evm::{ + execute::{BlockExecutor, ExecutableTxFor}, + ConfigureEvm, Evm, TxEnvFor, +}; +use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; +use reth_optimism_forks::OpHardforks; +use reth_primitives_traits::{ + transaction::TxHashRef, HeaderTy, NodePrimitives, Recovered, RecoveredBlock, SealedHeaderFor, +}; +use reth_provider::{ + providers::OverlayStateProviderFactory, BlockReader, DatabaseProviderROFactory, + HashedPostStateProvider, HeaderProvider, ProviderError, StateProvider, StateProviderFactory, + StateReader, +}; +use reth_revm::{ + cached::CachedReads, + database::StateProviderDatabase, + db::{states::bundle_state::BundleRetention, State}, +}; +use reth_rpc_eth_types::PendingBlock; +use reth_tasks::Runtime; +use reth_trie::{updates::TrieUpdates, HashedPostState, StateRoot}; +use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; + +/// Builds [`PendingSequence`]s from the accumulated flashblock transaction sequences. +/// Commits results directly to [`FlashblockStateCache`] via `handle_pending_sequence()`. +/// +/// The execution uses the Reth's [`PayloadProcessor`] for optimal execution and state +/// root calculation of flashlbocks sequence. All 3 state root computation strategies +/// are supported (synchronous, parrallel and state root task using sparse trie). +/// +/// - **Fresh (canonical parent)**: `StateProviderBuilder` with no overlay blocks. +/// - **Fresh (non-canonical parent)**: `StateProviderBuilder` with overlay blocks from +/// the flashblocks confirm/pending cache via `get_overlay_data()`. +/// - **Incremental (same height)**: Full re-execution via `execute_fresh()`. The warm +/// execution cache and `PreservedSparseTrie` from the previous sequence build offset +/// the cost of re-executing prefix transactions. +pub(crate) struct FlashblockSequenceValidator +where + EvmConfig: ConfigureEvm, + ChainSpec: OpHardforks, +{ + /// The flashblocks state cache containing the flashblocks state cache layer. + flashblocks_state: FlashblockStateCache, + /// Provider for database state access. + provider: Provider, + /// EVM configuration. + evm_config: EvmConfig, + /// Chain specification for hardfork checks. + chain_spec: Arc, + /// Configuration for the engine tree. + tree_config: TreeConfig, + /// Payload processor for state root computation. + payload_processor: PayloadProcessor, + /// Task runtime for spawning parallel work. + runtime: Runtime, +} + +impl + FlashblockSequenceValidator +where + N: NodePrimitives, + N::Receipt: FlashblockReceipt, + EvmConfig: ConfigureEvm + Unpin> + + 'static, + Provider: StateProviderFactory + + HeaderProvider
> + + OverlayProviderFactory + + BlockReader + + StateReader + + HashedPostStateProvider + + Unpin + + Clone, + ChainSpec: OpHardforks, +{ + pub(crate) fn new( + evm_config: EvmConfig, + provider: Provider, + chain_spec: Arc, + flashblocks_state: FlashblockStateCache, + runtime: Runtime, + tree_config: TreeConfig, + ) -> Self { + let payload_processor = PayloadProcessor::new( + runtime.clone(), + evm_config.clone(), + &tree_config, + Default::default(), + ); + Self { + flashblocks_state, + provider, + evm_config, + chain_spec, + tree_config, + payload_processor, + runtime, + } + } + + /// Executes the incoming flashblocks sequence transactions delta and commits the + /// result to the flashblocks state cache. + pub(crate) fn execute_sequence>>>( + &mut self, + args: BuildArgs, + ) -> eyre::Result<()> + where + N::SignedTx: Encodable2718, + N::Block: From>, + { + // Pre-validate incoming flashblocks sequence + let pending_sequence = self + .prevalidate_incoming_sequence(args.base.block_number, args.start_flashblock_index)?; + + let parent_hash = args.base.parent_hash; + let block_transactions: Vec<_> = args.transactions.into_iter().collect(); + let block_transaction_count = block_transactions.len(); + let transactions: Vec<_> = if let Some(ref seq) = pending_sequence { + block_transactions + .iter() + .skip(seq.prefix_execution_meta.cached_tx_count) + .cloned() + .collect() + } else { + block_transactions.clone() + }; + // Get state provider builder of parent hash + let (provider_builder, parent_header, overlay_data) = + self.state_provider_builder(parent_hash)?; + let mut state_provider = provider_builder.build()?; + + let attrs = args.base.clone().into(); + let evm_env = + self.evm_config.next_evm_env(&parent_header, &attrs).map_err(RethError::other)?; + + let execution_env = ExecutionEnv { + evm_env, + hash: B256::ZERO, + parent_hash, + parent_state_root: parent_header.state_root(), + transaction_count: transactions.len(), + withdrawals: Some(args.withdrawals), + }; + + // Plan the strategy used for state root computation. + let strategy = self.plan_state_root_computation(); + + debug!( + target: "flashblocks::validator", + ?strategy, + "Decided which state root algorithm to run" + ); + + // TODO: Extract the BAL once flashblocks BAL is supported + let bal = None; + + // Create lazy overlay from ancestors - this doesn't block, allowing execution to start + // before the trie data is ready. The overlay will be computed on first access. + let (lazy_overlay, anchor_hash) = self.get_parent_lazy_overlay(parent_hash); + + // Create overlay factory for payload processor (StateRootTask path needs it for + // multiproofs) + let overlay_factory = OverlayStateProviderFactory::new( + self.provider.clone(), + self.flashblocks_state.get_changeset_cache(), + ) + .with_block_hash(Some(anchor_hash)) + .with_lazy_overlay(lazy_overlay); + + // Spawn the appropriate processor based on strategy + let mut handle = self.spawn_payload_processor( + execution_env.clone(), + block_transactions.clone(), + provider_builder, + overlay_factory.clone(), + strategy, + bal, + )?; + + // Use cached state provider before executing, used in execution after prewarming threads + // complete + if let Some((caches, cache_metrics)) = handle.caches().zip(handle.cache_metrics()) { + state_provider = + Box::new(CachedStateProvider::new(state_provider, caches, cache_metrics)); + }; + + // Execute the block and handle any execution errors. + // The receipt root task is spawned before execution and receives receipts incrementally + // as transactions complete, allowing parallel computation during execution. + let (output, senders, receipt_root_rx, cached_reads) = self.execute_block( + state_provider.as_ref(), + execution_env, + &parent_header, + attrs, + transactions, + pending_sequence, + &mut handle, + )?; + + // After executing the block we can stop prewarming transactions + handle.stop_prewarming_execution(); + + // Create ExecutionOutcome early so we can terminate caching before validation and state + // root computation. Using Arc allows sharing with both the caching task and the deferred + // trie task without cloning the expensive BundleState. + let output = Arc::new(output); + + // Terminate caching task early since execution is complete and caching is no longer + // needed. This frees up resources while state root computation continues. + let valid_block_tx = handle.terminate_caching(Some(output.clone())); + + // Extract signed transactions for the block body before moving + // `block_transactions` into the tx root closure. + let body_transactions: Vec = + block_transactions.iter().map(|tx| tx.1.inner().clone()).collect(); + + // Spawn async tx root computation + let (result_tx, result_rx) = tokio::sync::oneshot::channel(); + self.payload_processor.executor().spawn_blocking(move || { + let txs: Vec<_> = block_transactions.iter().map(|tx| &tx.1).collect(); + let _ = result_tx.send(calculate_transaction_root(&txs)); + }); + + // Wait for the receipt root computation to complete. + let (receipts_root, logs_bloom) = { + debug!(target: "flashblocks::validator", "wait_receipt_root"); + receipt_root_rx + .blocking_recv() + .inspect_err(|_| { + tracing::error!( + target: "flashblocks::validator", + "Receipt root task dropped sender without result, receipt root calculation likely aborted" + ); + })? + }; + let transactions_root = result_rx.blocking_recv().inspect_err(|_| { + tracing::error!( + target: "flashblocks::validator", + "Transaction root task dropped sender without result, transaction root calculation likely aborted" + ); + })?; + + let root_time = Instant::now(); + let hashed_state = self.provider.hashed_post_state(&output.state); + let mut maybe_state_root = None; + match strategy { + StateRootStrategy::StateRootTask => { + debug!(target: "flashblocks::validator", "Using sparse trie state root algorithm"); + + let task_result = self.await_state_root_with_timeout( + &mut handle, + overlay_factory.clone(), + &hashed_state, + )?; + + match task_result { + Ok(StateRootComputeOutcome { state_root, trie_updates }) => { + let elapsed = root_time.elapsed(); + maybe_state_root = Some((state_root, trie_updates)); + info!(target: "flashblocks::validator", ?state_root, ?elapsed, "State root task finished"); + } + Err(error) => { + debug!(target: "flashblocks::validator", %error, "State root task failed"); + } + } + } + StateRootStrategy::Parallel => { + debug!(target: "flashblocks::validator", "Using parallel state root algorithm"); + match self.compute_state_root_parallel(overlay_factory.clone(), &hashed_state) { + Ok(result) => { + let elapsed = root_time.elapsed(); + info!( + target: "flashblocks::validator", + regular_state_root = ?result.0, + ?elapsed, + "Regular root task finished" + ); + maybe_state_root = Some((result.0, result.1)); + } + Err(error) => { + debug!(target: "flashblocks::validator", %error, "Parallel state root computation failed"); + } + } + } + StateRootStrategy::Synchronous => {} + } + + // Determine the state root. + // If the state root was computed in parallel, we use it. + // Otherwise, we fall back to computing it synchronously. + let (state_root, trie_output) = if let Some(maybe_state_root) = maybe_state_root { + maybe_state_root + } else { + // fallback is to compute the state root regularly in sync + warn!(target: "flashblocks::validator", "Failed to compute state root"); + let (root, updates) = + Self::compute_state_root_serial(overlay_factory.clone(), &hashed_state)?; + (root, updates) + }; + + // Capture execution metrics before `output` is moved into the deferred trie task. + let prefix_gas_used = output.result.gas_used; + let prefix_blob_gas_used = output.result.blob_gas_used; + + // Assemble the block using pre-computed roots (avoids recomputation). + let block = assemble_flashblock( + self.chain_spec.as_ref(), + FlashblockAssemblerInput { + base: &args.base, + state_root, + transactions_root, + receipts_root, + logs_bloom, + gas_used: prefix_gas_used, + blob_gas_used: prefix_blob_gas_used, + bundle_state: &output.state, + state_provider: state_provider.as_ref(), + transactions: body_transactions, + }, + )?; + let block: N::Block = block.into(); + let block = RecoveredBlock::new_unhashed(block, senders); + + if let Some(valid_block_tx) = valid_block_tx { + let _ = valid_block_tx.send(()); + } + let executed_block = self.spawn_deferred_trie_task( + block, + output, + hashed_state, + trie_output, + overlay_data, + overlay_factory, + ); + + // Update `PayloadProcessor`'s execution cache for next block's prewarming + self.payload_processor.on_inserted_executed_block( + executed_block.recovered_block.block_with_parent(), + &executed_block.execution_output.state, + ); + + self.commit_pending_sequence( + args.base, + executed_block, + PrefixExecutionMeta { + cached_reads, + cached_tx_count: block_transaction_count, + gas_used: prefix_gas_used, + blob_gas_used: prefix_blob_gas_used, + last_flashblock_index: args.last_flashblock_index, + }, + block_transaction_count, + )?; + + Ok(()) + } + + /// Builds a [`PendingSequence`] from an [`ExecutionOutcome`] and commits it to the + /// flashblocks state cache. + fn commit_pending_sequence( + &self, + base: OpFlashblockPayloadBase, + executed_block: ExecutedBlock, + prefix_execution_meta: PrefixExecutionMeta, + transaction_count: usize, + ) -> eyre::Result<()> { + let block_hash = executed_block.recovered_block.hash(); + let parent_hash = base.parent_hash; + + // Build tx index + let mut tx_index = HashMap::with_capacity(transaction_count); + + for (idx, tx) in executed_block.recovered_block.transactions_recovered().enumerate() { + tx_index.insert( + *tx.tx_hash(), + CachedTxInfo { + block_number: base.block_number, + block_hash, + tx_index: idx as u64, + tx: tx.into_inner().clone(), + receipt: executed_block.execution_output.result.receipts[idx].clone(), + }, + ); + } + self.flashblocks_state.handle_pending_sequence(PendingSequence { + pending: PendingBlock::with_executed_block( + Instant::now() + Duration::from_secs(1), + executed_block, + ), + prefix_execution_meta, + tx_index, + block_hash, + parent_hash, + }) + } + + fn prevalidate_incoming_sequence( + &self, + incoming_block_number: u64, + incoming_index: u64, + ) -> eyre::Result>> { + if let Some(pending) = self.flashblocks_state.get_pending_sequence() { + // Validate incoming height continuity + let pending_height = pending.get_height(); + if pending_height != incoming_block_number + && pending_height + 1 != incoming_block_number + { + return Err(eyre::eyre!( + "height mismatch: incoming={incoming_block_number}, pending={pending_height}" + )); + } + let incremental = pending_height == incoming_block_number; + if incremental { + // Validate states of last executed flashblock index + let last_index = pending.prefix_execution_meta.last_flashblock_index; + if last_index.saturating_add(1) != incoming_index { + return Err(eyre::eyre!( + "flashblock index mismatch: incoming={incoming_index}, pending={last_index}" + )); + } + return Ok(Some(pending)); + } else if incoming_index != 0 { + // Optimistic fresh build. Validate that build is starting from index = 0. + return Err(eyre::eyre!( + "flashblock index mismatch: should start from index 0 but incoming={incoming_index}" + )); + } + return Ok(None); + } + // No pending sequence initialized yet. Validate with canonical chainstate height + let canon_height = self.flashblocks_state.get_canon_height(); + if incoming_block_number > canon_height + 1 { + return Err(eyre::eyre!( + "height mismatch: incoming={incoming_block_number}, canonical={canon_height}" + )); + } + return Ok(None); + } + + /// Executes a block with the given state provider. + /// + /// This method orchestrates block execution: + /// 1. Sets up the EVM with state database and precompile caching + /// 2. Spawns a background task for incremental receipt root computation + /// 3. Executes transactions with metrics collection via state hooks + /// 4. Merges state transitions and records execution metrics + #[expect(clippy::type_complexity)] + fn execute_block( + &mut self, + state_provider: &dyn StateProvider, + execution_env: ExecutionEnv, + parent_header: &SealedHeaderFor, + attrs: EvmConfig::NextBlockEnvCtx, + transactions: Vec>>, + pending_sequence: Option>, + handle: &mut PayloadHandle, + ) -> eyre::Result<( + BlockExecutionOutput, + Vec
, + tokio::sync::oneshot::Receiver<(B256, alloy_primitives::Bloom)>, + CachedReads, + )> + where + T: ExecutableTxFor + ExecutableTxParts, N::SignedTx>, + Err: core::error::Error + Send + Sync + 'static, + EvmConfig: ConfigureEvm + Unpin> + + 'static, + { + // Build state + let mut read_cache = pending_sequence + .as_ref() + .map(|p| p.prefix_execution_meta.cached_reads.clone()) + .unwrap_or_default(); + let cached_db = read_cache.as_db_mut(StateProviderDatabase::new(state_provider)); + let mut state_builder = State::builder().with_database(cached_db).with_bundle_update(); + if let Some(seq) = pending_sequence.as_ref() { + state_builder = state_builder + .with_bundle_prestate(seq.pending.executed_block.execution_output.state.clone()); + } + let mut db = state_builder.build(); + + // For incremental builds, the only pre-execution effect we need is set_state_clear_flag, + // which configures EVM empty-account handling (OP Stack chains activate Spurious Dragon + // at genesis, so this is always true). + if pending_sequence.is_some() { + db.set_state_clear_flag(true); + } + + let evm = self.evm_config.evm_with_env(&mut db, execution_env.evm_env); + let execution_ctx = self + .evm_config + .context_for_next_block(parent_header, attrs) + .map_err(RethError::other)?; + let executor = self.evm_config.create_executor(evm, execution_ctx.clone()); + // Release the lifetime tie to &mut db so subsequent mutable borrows of db are allowed. + drop(execution_ctx); + + // Spawn background task to compute receipt root and logs bloom incrementally. + // Unbounded channel is used since tx count bounds capacity anyway (max ~30k txs per block). + let receipts_len = transactions.len(); + let (receipt_tx, receipt_rx) = crossbeam_channel::unbounded(); + let (result_tx, result_rx) = tokio::sync::oneshot::channel(); + let task_handle = ReceiptRootTaskHandle::new(receipt_rx, result_tx); + self.payload_processor.executor().spawn_blocking(move || task_handle.run(receipts_len)); + + let transaction_count = transactions.len(); + let mut executor = executor.with_state_hook(Some(Box::new(handle.state_hook()))); + + // Apply pre-execution changes for fresh builds + if pending_sequence.is_none() { + executor.apply_pre_execution_changes()?; + } + + // Execute all transactions and finalize + let (executor, senders) = self.execute_transactions( + executor, + pending_sequence.as_ref(), + transaction_count, + handle, + &receipt_tx, + )?; + drop(receipt_tx); + + // Finish execution and get the result + let (_evm, mut result) = executor.finish().map(|(evm, result)| (evm.into_db(), result))?; + if let Some(seq) = pending_sequence.as_ref() { + result = Self::merge_suffix_results( + &seq.prefix_execution_meta, + seq.pending.receipts.as_ref().clone(), + result, + ); + } + // Merge transitions into bundle state + db.merge_transitions(BundleRetention::Reverts); + + // Explicitly drop db to release the mutable borrow on read_cache held via cached_db, + // allowing read_cache to be moved into the return value. + let bundle = db.take_bundle(); + drop(db); + + let output = BlockExecutionOutput { result, state: bundle }; + debug!(target: "flashblocks::validator", "Executed block"); + + Ok((output, senders, result_rx, read_cache)) + } + + fn execute_transactions( + &self, + mut executor: Executor, + pending_sequence: Option<&PendingSequence>, + transaction_count: usize, + handle: &mut PayloadHandle, + receipt_tx: &crossbeam_channel::Sender>, + ) -> eyre::Result<(Executor, Vec
), BlockExecutionError> + where + T: ExecutableTxFor + + ExecutableTxParts< + <::Evm as Evm>::Tx, + ::Transaction, + >, + Executor: BlockExecutor, + Err: core::error::Error + Send + Sync + 'static, + EvmConfig: ConfigureEvm + Unpin> + + 'static, + { + // Send all previously executed receipts to the receipt root task for incremental builds + if let Some(seq) = pending_sequence { + for (index, receipt) in seq.pending.receipts.iter().enumerate() { + let _ = receipt_tx.send(IndexedReceipt::new(index, receipt.clone())); + } + } + + let mut senders = Vec::with_capacity(transaction_count); + let mut transactions = handle.iter_transactions().into_iter(); + + // Some executors may execute transactions that do not append receipts during the + // main loop (e.g., system transactions whose receipts are added during finalization). + // In that case, invoking the callback on every transaction would resend the previous + // receipt with the same index and can panic the ordered root builder. + let mut last_sent_len = 0usize; + loop { + let Some(tx_result) = transactions.next() else { break }; + + let tx = tx_result.map_err(BlockExecutionError::other)?; + let tx_signer = *tx.signer(); + senders.push(tx_signer); + + trace!(target: "flashblocks::validator", "Executing transaction"); + executor.execute_transaction(tx)?; + + let current_len = executor.receipts().len(); + if current_len > last_sent_len { + last_sent_len = current_len; + // Send the latest receipt to the background task for incremental root computation. + if let Some(receipt) = executor.receipts().last() { + let tx_index = current_len - 1; + let _ = receipt_tx.send(IndexedReceipt::new(tx_index, receipt.clone())); + } + } + } + Ok((executor, senders)) + } + + /// Determines the state root computation strategy based on configuration. + /// + /// Note: Use state root task only if prefix sets are empty, otherwise proof generation is + /// too expensive because it requires walking all paths in every proof. + const fn plan_state_root_computation(&self) -> StateRootStrategy { + if self.tree_config.state_root_fallback() { + StateRootStrategy::Synchronous + } else if self.tree_config.use_state_root_task() { + StateRootStrategy::StateRootTask + } else { + StateRootStrategy::Parallel + } + } + + fn spawn_payload_processor( + &mut self, + env: ExecutionEnv, + txs: Vec>>, + provider_builder: StateProviderBuilder, + overlay_factory: OverlayStateProviderFactory, + strategy: StateRootStrategy, + bal: Option>, + ) -> eyre::Result< + PayloadHandle< + impl ExecutableTxFor + use, + impl core::error::Error + Send + Sync + 'static + use, + N::Receipt, + >, + > { + let tx_iter = Self::flashblock_tx_iterator(txs); + match strategy { + StateRootStrategy::StateRootTask => { + // Use the pre-computed overlay factory for multiproofs + Ok(self.payload_processor.spawn( + env, + tx_iter, + provider_builder, + overlay_factory, + &self.tree_config, + bal, + )) + } + StateRootStrategy::Parallel | StateRootStrategy::Synchronous => Ok(self + .payload_processor + .spawn_cache_exclusive(env, tx_iter, provider_builder, bal)), + } + } + + /// Awaits the state root from the background task, with an optional timeout fallback. + /// + /// If a timeout is configured (`state_root_task_timeout`), this method first waits for the + /// state root task up to the timeout duration. If the task doesn't complete in time, a + /// sequential state root computation is spawned via `spawn_blocking`. Both computations + /// then race: the main thread polls the task receiver and the sequential result channel + /// in a loop, returning whichever finishes first. + /// + /// If no timeout is configured, this simply awaits the state root task without any fallback. + /// + /// Returns `ProviderResult>` where the outer `ProviderResult` captures + /// unrecoverable errors from the sequential fallback (e.g. DB errors), while the inner + /// `Result` captures parallel state root task errors that can still fall back to serial. + fn await_state_root_with_timeout( + &self, + handle: &mut PayloadHandle, + overlay_factory: OverlayStateProviderFactory, + hashed_state: &HashedPostState, + ) -> eyre::Result> { + let Some(timeout) = self.tree_config.state_root_task_timeout() else { + return Ok(handle.state_root()); + }; + + let task_rx = handle.take_state_root_rx(); + + match task_rx.recv_timeout(timeout) { + Ok(result) => Ok(result), + Err(RecvTimeoutError::Disconnected) => { + Ok(Err(ParallelStateRootError::Other("sparse trie task dropped".to_string()))) + } + Err(RecvTimeoutError::Timeout) => { + warn!( + target: "flashblocks::validator", + ?timeout, + "State root task timed out, spawning sequential fallback" + ); + + let (seq_tx, seq_rx) = + std::sync::mpsc::channel::>(); + + let seq_overlay = overlay_factory; + let seq_hashed_state = hashed_state.clone(); + self.payload_processor.executor().spawn_blocking(move || { + let result = Self::compute_state_root_serial(seq_overlay, &seq_hashed_state); + let _ = seq_tx.send(result); + }); + + const POLL_INTERVAL: std::time::Duration = std::time::Duration::from_millis(10); + + loop { + match task_rx.recv_timeout(POLL_INTERVAL) { + Ok(result) => { + debug!( + target: "flashblocks::validator", + source = "task", + "State root timeout race won" + ); + return Ok(result); + } + Err(RecvTimeoutError::Disconnected) => { + debug!( + target: "flashblocks::validator", + "State root task dropped, waiting for sequential fallback" + ); + let result = seq_rx.recv().map_err(|_| { + eyre::eyre!(std::io::Error::other( + "both state root computations failed", + )) + })?; + let (state_root, trie_updates) = result?; + return Ok(Ok(StateRootComputeOutcome { state_root, trie_updates })); + } + Err(RecvTimeoutError::Timeout) => {} + } + + if let Ok(result) = seq_rx.try_recv() { + debug!( + target: "flashblocks::validator", + source = "sequential", + "State root timeout race won" + ); + let (state_root, trie_updates) = result?; + return Ok(Ok(StateRootComputeOutcome { state_root, trie_updates })); + } + } + } + } + } + + /// Compute state root for the given hashed post state in parallel. + /// + /// Uses an overlay factory which provides the state of the parent block, along with the + /// [`HashedPostState`] containing the changes of this block, to compute the state root and + /// trie updates for this block. + /// + /// # Returns + /// + /// Returns `Ok(_)` if computed successfully. + /// Returns `Err(_)` if error was encountered during computation. + fn compute_state_root_parallel( + &self, + overlay_factory: OverlayStateProviderFactory, + hashed_state: &HashedPostState, + ) -> eyre::Result<(B256, TrieUpdates), ParallelStateRootError> { + // The `hashed_state` argument will be taken into account as part of the overlay, but we + // need to use the prefix sets which were generated from it to indicate to the + // ParallelStateRoot which parts of the trie need to be recomputed. + let prefix_sets = hashed_state.construct_prefix_sets().freeze(); + let overlay_factory = + overlay_factory.with_extended_hashed_state_overlay(hashed_state.clone_into_sorted()); + ParallelStateRoot::new(overlay_factory, prefix_sets, self.runtime.clone()) + .incremental_root_with_updates() + } + + /// Compute state root for the given hashed post state in serial. + /// + /// Uses an overlay factory which provides the state of the parent block, along with the + /// [`HashedPostState`] containing the changes of this block, to compute the state root and + /// trie updates for this block. + fn compute_state_root_serial( + overlay_factory: OverlayStateProviderFactory, + hashed_state: &HashedPostState, + ) -> eyre::Result<(B256, TrieUpdates)> { + // The `hashed_state` argument will be taken into account as part of the overlay, but we + // need to use the prefix sets which were generated from it to indicate to the + // StateRoot which parts of the trie need to be recomputed. + let prefix_sets = hashed_state.construct_prefix_sets().freeze(); + let overlay_factory = + overlay_factory.with_extended_hashed_state_overlay(hashed_state.clone_into_sorted()); + + let provider = overlay_factory.database_provider_ro()?; + + Ok(StateRoot::new(&provider, &provider) + .with_prefix_sets(prefix_sets) + .root_with_updates()?) + } + + fn merge_suffix_results( + cached_prefix: &PrefixExecutionMeta, + cached_receipts: Vec, + mut suffix_result: BlockExecutionResult, + ) -> BlockExecutionResult { + N::Receipt::add_cumulative_gas_offset(&mut suffix_result.receipts, cached_prefix.gas_used); + + let mut receipts = cached_receipts; + receipts.extend(suffix_result.receipts); + + // Use only suffix requests: the suffix executor's finish() produces + // post-execution requests from the complete block state (cached prestate + + // suffix changes). The cached prefix requests came from an intermediate + // state and must not be merged. + let requests = suffix_result.requests; + BlockExecutionResult { + receipts, + requests, + gas_used: cached_prefix.gas_used.saturating_add(suffix_result.gas_used), + blob_gas_used: cached_prefix.blob_gas_used.saturating_add(suffix_result.blob_gas_used), + } + } + + fn state_provider_builder( + &self, + hash: B256, + ) -> eyre::Result<( + StateProviderBuilder, + SealedHeaderFor, + Option<(Vec>, B256)>, + )> { + // Get overlay data (executed blocks + parent header) from flashblocks cache + if let Some((overlay_blocks, header, canon_hash)) = + self.flashblocks_state.get_overlay_data(&hash) + { + debug!( + target: "flashblocks::validator", + %hash, + "found state for block in flashblocks cache, creating provider builder"); + return Ok(( + StateProviderBuilder::new( + self.provider.clone(), + canon_hash, + Some(overlay_blocks.clone()), + ), + header, + Some((overlay_blocks, canon_hash)), + )); + } + // Check if block is persisted + if let Some(header) = self.provider.sealed_header_by_hash(hash)? { + debug!( + target: "flashblocks::validator", + %hash, + "found state for block in database, creating provider builder"); + return Ok(( + StateProviderBuilder::new(self.provider.clone(), hash, None), + header, + None, + )); + } + Err(eyre::eyre!("no state found for block {hash}")) + } + + /// Creates a [`LazyOverlay`] for the parent block without blocking. + /// + /// Returns a lazy overlay that will compute the trie input on first access, and the anchor + /// block hash (the highest persisted ancestor). This allows execution to start immediately + /// while the trie input computation is deferred until the overlay is actually needed. + /// + /// If parent is on disk (no in-memory blocks), returns `None` for the lazy overlay. + /// + /// Uses a cached overlay if available for the canonical head (the common case). + fn get_parent_lazy_overlay(&self, parent_hash: B256) -> (Option, B256) { + // Get blocks leading to the parent to determine the anchor + let (blocks, anchor_hash) = self + .flashblocks_state + .get_overlay_data(&parent_hash) + .map(|(blocks, _, anchor_hash)| (blocks, anchor_hash)) + .unwrap_or_else(|| (vec![], B256::ZERO)); + + if blocks.is_empty() { + debug!(target: "flashblocks::validator", "Parent found on disk, no lazy overlay needed"); + return (None, anchor_hash); + } + + // Extract deferred trie data handles (non-blocking) + debug!( + target: "flashblocks::validator", + %anchor_hash, + num_blocks = blocks.len(), + "Creating lazy overlay for flashblock state cache in-memory blocks" + ); + let handles: Vec = blocks.iter().map(|b| b.trie_data_handle()).collect(); + (Some(LazyOverlay::new(anchor_hash, handles)), anchor_hash) + } + + /// Spawns a background task to compute and sort trie data for the executed block. + /// + /// This function creates a [`DeferredTrieData`] handle with fallback inputs and spawns a + /// blocking task that calls `wait_cloned()` to: + /// 1. Sort the block's hashed state and trie updates + /// 2. Merge ancestor overlays and extend with the sorted data + /// 3. Create an [`AnchoredTrieInput`](reth_chain_state::AnchoredTrieInput) for efficient future + /// trie computations + /// 4. Cache the result so subsequent calls return immediately + /// + /// If the background task hasn't completed when `trie_data()` is called, `wait_cloned()` + /// computes from the stored inputs, eliminating deadlock risk and duplicate computation. + /// + /// The validation hot path can return immediately after state root verification, + /// while consumers (DB writes, overlay providers, proofs) get trie data either + /// from the completed task or via fallback computation. + fn spawn_deferred_trie_task( + &self, + block: RecoveredBlock, + execution_outcome: Arc>, + hashed_state: HashedPostState, + trie_output: TrieUpdates, + overlay_data: Option<(Vec>, B256)>, + overlay_factory: OverlayStateProviderFactory, + ) -> ExecutedBlock { + // Capture parent hash and ancestor overlays for deferred trie input construction. + let (overlay_blocks, anchor_hash) = + overlay_data.unwrap_or_else(|| (Vec::new(), block.parent_hash())); + + // Collect lightweight ancestor trie data handles. We don't call trie_data() here; + // the merge and any fallback sorting happens in the compute_trie_input_task. + let ancestors: Vec = + overlay_blocks.iter().rev().map(|b| b.trie_data_handle()).collect(); + + // Create deferred handle with fallback inputs in case the background task hasn't completed. + let deferred_trie_data = DeferredTrieData::pending( + Arc::new(hashed_state), + Arc::new(trie_output), + anchor_hash, + ancestors, + ); + let deferred_handle_task = deferred_trie_data.clone(); + + // Capture block info and cache handle for changeset computation + let block_hash = block.hash(); + let block_number = block.number(); + let changeset_cache = self.flashblocks_state.get_changeset_cache(); + + // Spawn background task to compute trie data. Calling `wait_cloned` will compute from + // the stored inputs and cache the result, so subsequent calls return immediately. + let compute_trie_input_task = move || { + debug!( + target: "flashblocks::changeset", + ?block_number, + "compute_trie_input_task", + ); + + let result = panic::catch_unwind(AssertUnwindSafe(|| { + let computed = deferred_handle_task.wait_cloned(); + // Compute and cache changesets using the computed trie_updates + // Get a provider from the overlay factory for trie cursor access + let changeset_start = Instant::now(); + let changeset_result = + overlay_factory.database_provider_ro().and_then(|provider| { + reth_trie::changesets::compute_trie_changesets( + &provider, + &computed.trie_updates, + ) + .map_err(ProviderError::Database) + }); + + match changeset_result { + Ok(changesets) => { + debug!( + target: "flashblocks::changeset", + ?block_number, + elapsed = ?changeset_start.elapsed(), + "Computed and caching changesets" + ); + changeset_cache.insert(block_hash, block_number, Arc::new(changesets)); + } + Err(e) => { + warn!( + target: "flashblocks::changeset", + ?block_number, + ?e, + "Failed to compute changesets in deferred trie task" + ); + } + } + })); + + if result.is_err() { + error!( + target: "flashblocks::validator", + "Deferred trie task panicked; fallback computation will be used when trie data is accessed" + ); + } + }; + + // Spawn task that computes trie data asynchronously. + self.payload_processor.executor().spawn_blocking(compute_trie_input_task); + + ExecutedBlock::with_deferred_trie_data( + Arc::new(block), + execution_outcome, + deferred_trie_data, + ) + } + + #[allow(clippy::type_complexity)] + fn flashblock_tx_iterator( + transactions: Vec>>, + ) -> ( + Vec>>, + fn(WithEncoded>) -> Result, Infallible>, + ) { + (transactions, |tx| Ok(tx.1)) + } +} diff --git a/crates/flashblocks/src/execution/worker.rs b/crates/flashblocks/src/execution/worker.rs deleted file mode 100644 index 4d0f9154..00000000 --- a/crates/flashblocks/src/execution/worker.rs +++ /dev/null @@ -1,678 +0,0 @@ -use crate::{ - cache::{FlashblockStateCache, PendingSequence}, - execution::{CachedExecutionMeta, TransactionCache}, -}; -use std::{ - sync::Arc, - time::{Duration, Instant}, -}; -use tokio_util::sync::CancellationToken; -use tracing::trace; - -use alloy_eips::{eip2718::WithEncoded, BlockNumberOrTag}; -use alloy_primitives::B256; -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; - -use reth_chain_state::{ComputedTrieData, ExecutedBlock}; -use reth_errors::RethError; -use reth_evm::{ - execute::{ - BlockAssembler, BlockAssemblerInput, BlockBuilder, BlockBuilderOutcome, BlockExecutor, - }, - ConfigureEvm, Evm, -}; -use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; -use reth_optimism_primitives::OpReceipt; -use reth_primitives_traits::{ - transaction::TxHashRef, AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy, - Recovered, RecoveredBlock, SealedHeader, -}; -use reth_revm::{ - cached::CachedReads, - database::StateProviderDatabase, - db::{states::bundle_state::BundleRetention, BundleState, State}, -}; -use reth_rpc_eth_types::{EthApiError, PendingBlock}; -use reth_storage_api::{ - noop::NoopProvider, BlockReaderIdExt, HashedPostStateProvider, StateProviderFactory, - StateRootProvider, -}; - -pub(crate) struct BuildArgs { - pub(crate) base: OpFlashblockPayloadBase, - pub(crate) transactions: I, - pub(crate) cached_state: Option<(B256, CachedReads)>, - pub(crate) last_flashblock_index: u64, - pub(crate) cancel: CancellationToken, -} - -/// The `FlashblocksValidator` builds [`PendingBlock`] out of a sequence of transactions. -/// -/// Owns a [`TransactionCache`] for incremental prefix caching between flashblock builds. -#[derive(Debug)] -pub(crate) struct FlashblocksValidator { - /// The EVM configuration used to build the flashblocks. - evm_config: EvmConfig, - /// The transaction execution cache for incremental executions. - tx_cache: TransactionCache, - /// The state cache containing the canonical chainstate provider and the flashblocks - /// state cache layer. - state_cache: FlashblockStateCache, -} - -impl FlashblocksValidator { - pub(crate) fn new( - evm_config: EvmConfig, - state_cache: FlashblockStateCache, - ) -> Self { - Self { evm_config, state_cache, tx_cache: TransactionCache::new() } - } - - pub(crate) const fn provider(&self) -> &Provider { - &self.provider - } - - /// Clears the transaction cache (used on reorg/catch-up). - pub(crate) fn clear_cache(&mut self) { - self.tx_cache.clear(); - } -} - -/// Cached prefix execution data used to resume canonical builds. -#[derive(Debug, Clone)] -struct CachedPrefixExecutionResult { - /// Number of leading transactions covered by cached execution. - cached_tx_count: usize, - /// Cumulative bundle state after executing the cached prefix. - bundle: BundleState, - /// Cached receipts for the prefix. - receipts: Vec, - /// Total gas used by the cached prefix. - gas_used: u64, - /// Total blob/DA gas used by the cached prefix. - blob_gas_used: u64, -} - -/// Receipt requirements for cache-resume flow. -pub trait FlashblockCachedReceipt: Clone { - /// Adds `gas_offset` to each receipt's `cumulative_gas_used`. - fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64); -} - -impl FlashblockCachedReceipt for OpReceipt { - fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64) { - if gas_offset == 0 { - return; - } - - for receipt in receipts { - let inner = receipt.as_receipt_mut(); - inner.cumulative_gas_used = inner.cumulative_gas_used.saturating_add(gas_offset); - } - } -} - -impl FlashBlockBuilder -where - N: NodePrimitives, - N::Receipt: FlashblockCachedReceipt, - EvmConfig: ConfigureEvm + Unpin>, - Provider: StateProviderFactory - + BlockReaderIdExt< - Header = HeaderTy, - Block = BlockTy, - Transaction = N::SignedTx, - Receipt = ReceiptTy, - > + Unpin, -{ - /// Returns the [`PendingSequence`], which contains the full built execution state of - /// the flashblocks sequence passed in `BuildArgs`. - /// - /// The - /// - /// In canonical mode, the internal transaction cache is used to resume from - /// cached state if the transaction list is a continuation of what was previously - /// executed. - /// - /// Returns `None` if: - /// - In canonical mode: flashblock doesn't attach to the latest header - /// - In speculative mode: no pending parent state provided - pub(crate) fn execute>>>( - &mut self, - mut args: BuildArgs, - ) -> eyre::Result> { - trace!(target: "flashblocks", "Attempting new pending block from flashblocks"); - - let parent_hash = args.base.parent_hash; - let parent_header = self.state_cache.latest_header(parent_hash)?; - let state_provider = self.state_cache.history_by_block_hash(parent_header.hash())?; - - let latest = self - .provider - .latest_header()? - .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; - let latest_hash = latest.hash(); - - // Determine build mode: canonical (parent is local tip) or speculative (parent is pending) - let is_canonical = args.base.parent_hash == latest_hash; - let has_pending_parent = args.pending_parent.is_some(); - - if !is_canonical && !has_pending_parent { - trace!( - target: "flashblocks", - flashblock_parent = ?args.base.parent_hash, - local_latest = ?latest.num_hash(), - "Skipping non-consecutive flashblock (no pending parent available)" - ); - return Ok(None); - } - - // Collect transactions and extract hashes for cache lookup - let transactions: Vec<_> = args.transactions.into_iter().collect(); - let tx_hashes: Vec = transactions.iter().map(|tx| *tx.tx_hash()).collect(); - - // Get state provider and parent header context. - // For speculative builds, use the canonical anchor hash (not the pending parent hash) - // for storage reads, but execute with the pending parent's sealed header context. - let (state_provider, canonical_anchor, parent_header) = if is_canonical { - (self.provider.history_by_block_hash(latest.hash())?, latest.hash(), &latest) - } else { - // For speculative building, we need to use the canonical anchor - // and apply the pending state's bundle on top of it - let pending = args.pending_parent.as_ref().unwrap(); - let Some(parent_header) = pending.sealed_header.as_ref() else { - trace!( - target: "flashblocks", - pending_block_number = pending.block_number, - pending_block_hash = ?pending.block_hash, - "Skipping speculative build: pending parent header is unavailable" - ); - return Ok(None); - }; - if !is_consistent_speculative_parent_hashes( - args.base.parent_hash, - pending.block_hash, - parent_header.hash(), - ) { - trace!( - target: "flashblocks", - incoming_parent_hash = ?args.base.parent_hash, - pending_block_hash = ?pending.block_hash, - pending_sealed_hash = ?parent_header.hash(), - pending_block_number = pending.block_number, - "Skipping speculative build: inconsistent pending parent hashes" - ); - return Ok(None); - } - trace!( - target: "flashblocks", - pending_block_number = pending.block_number, - pending_block_hash = ?pending.block_hash, - canonical_anchor = ?pending.canonical_anchor_hash, - "Building speculatively on pending state" - ); - ( - self.provider.history_by_block_hash(pending.canonical_anchor_hash)?, - pending.canonical_anchor_hash, - parent_header, - ) - }; - - // Set up cached reads - let cache_key = if is_canonical { latest_hash } else { args.base.parent_hash }; - let mut request_cache = args - .cached_state - .take() - .filter(|(hash, _)| hash == &cache_key) - .map(|(_, state)| state) - .unwrap_or_else(|| { - // For speculative builds, use cached reads from pending parent - args.pending_parent.as_ref().map(|p| p.cached_reads.clone()).unwrap_or_default() - }); - - let cached_db = request_cache.as_db_mut(StateProviderDatabase::new(&state_provider)); - - // Check for resumable canonical execution state. - let canonical_parent_hash = args.base.parent_hash; - let cached_prefix = if is_canonical { - self.tx_cache - .get_resumable_state_with_execution_meta_for_parent( - args.base.block_number, - canonical_parent_hash, - &tx_hashes, - ) - .map(|(bundle, receipts, _requests, gas_used, blob_gas_used, cached_tx_count)| { - trace!( - target: "flashblocks", - cached_tx_count, - total_txs = tx_hashes.len(), - "Cache hit (executing only uncached suffix)" - ); - CachedPrefixExecutionResult { - cached_tx_count, - bundle: bundle.clone(), - receipts: receipts.to_vec(), - gas_used, - blob_gas_used, - } - }) - } else { - None - }; - - // Build state with appropriate prestate - // - Speculative builds use pending parent prestate - // - Canonical cache-hit builds use cached prefix prestate - let mut state = if let Some(ref pending) = args.pending_parent { - State::builder() - .with_database(cached_db) - .with_bundle_prestate(pending.execution_outcome.state.clone()) - .with_bundle_update() - .build() - } else if let Some(ref cached_prefix) = cached_prefix { - State::builder() - .with_database(cached_db) - .with_bundle_prestate(cached_prefix.bundle.clone()) - .with_bundle_update() - .build() - } else { - State::builder().with_database(cached_db).with_bundle_update().build() - }; - - let (execution_result, block, hashed_state, bundle) = if let Some(cached_prefix) = - cached_prefix - { - // Cached prefix execution model: - // - The cached bundle prestate already includes pre-execution state changes - // (blockhash/beacon root updates, create2deployer), so we do NOT call - // apply_pre_execution_changes() again. - // - The only pre-execution effect we need is set_state_clear_flag, which configures EVM - // empty-account handling (OP Stack chains activate Spurious Dragon at genesis, so - // this is always true). - // - Suffix transactions execute against the warm prestate. - // - Post-execution (finish()) runs once on the suffix executor, producing correct - // results for the full block. For OP Stack post-merge, the - // post_block_balance_increments are empty (no block rewards, no ommers, no - // withdrawals passed), so finish() only seals execution state. - let attrs = args.base.clone().into(); - let evm_env = - self.evm_config.next_evm_env(parent_header, &attrs).map_err(RethError::other)?; - let execution_ctx = self - .evm_config - .context_for_next_block(parent_header, attrs) - .map_err(RethError::other)?; - - // The cached bundle prestate already includes pre-execution state changes. - // Only set the state clear flag (Spurious Dragon empty-account handling). - state.set_state_clear_flag(true); - let evm = self.evm_config.evm_with_env(&mut state, evm_env); - let mut executor = self.evm_config.create_executor(evm, execution_ctx.clone()); - - for tx in transactions.iter().skip(cached_prefix.cached_tx_count).cloned() { - let _gas_used = executor.execute_transaction(tx)?; - } - - let (evm, suffix_execution_result) = executor.finish()?; - let (db, evm_env) = evm.finish(); - db.merge_transitions(BundleRetention::Reverts); - - let execution_result = - Self::merge_cached_and_suffix_results(cached_prefix, suffix_execution_result); - - let (hashed_state, state_root) = if args.compute_state_root { - trace!(target: "flashblocks", "Computing block state root"); - let hashed_state = state_provider.hashed_post_state(&db.bundle_state); - let (state_root, _) = state_provider - .state_root_with_updates(hashed_state.clone()) - .map_err(RethError::other)?; - (hashed_state, state_root) - } else { - let noop_provider = NoopProvider::default(); - let hashed_state = noop_provider.hashed_post_state(&db.bundle_state); - let (state_root, _) = noop_provider - .state_root_with_updates(hashed_state.clone()) - .map_err(RethError::other)?; - (hashed_state, state_root) - }; - let bundle = db.take_bundle(); - - let (block_transactions, senders): (Vec<_>, Vec<_>) = - transactions.iter().map(|tx| tx.1.clone().into_parts()).unzip(); - let block = self - .evm_config - .block_assembler() - .assemble_block(BlockAssemblerInput::new( - evm_env, - execution_ctx, - parent_header, - block_transactions, - &execution_result, - &bundle, - &state_provider, - state_root, - )) - .map_err(RethError::other)?; - let block = RecoveredBlock::new_unhashed(block, senders); - - (execution_result, block, hashed_state, bundle) - } else { - let mut builder = self - .evm_config - .builder_for_next_block(&mut state, parent_header, args.base.clone().into()) - .map_err(RethError::other)?; - - builder.apply_pre_execution_changes()?; - - for tx in transactions { - let _gas_used = builder.execute_transaction(tx)?; - } - - let BlockBuilderOutcome { execution_result, block, hashed_state, .. } = - if args.compute_state_root { - trace!(target: "flashblocks", "Computing block state root"); - builder.finish(&state_provider)? - } else { - builder.finish(NoopProvider::default())? - }; - let bundle = state.take_bundle(); - - (execution_result, block, hashed_state, bundle) - }; - - // Update internal transaction cache (only in canonical mode) - if is_canonical { - self.tx_cache.update_with_execution_meta_for_parent( - args.base.block_number, - canonical_parent_hash, - tx_hashes, - bundle.clone(), - execution_result.receipts.clone(), - CachedExecutionMeta { - requests: execution_result.requests.clone(), - gas_used: execution_result.gas_used, - blob_gas_used: execution_result.blob_gas_used, - }, - ); - } - - let execution_outcome = BlockExecutionOutput { state: bundle, result: execution_result }; - let execution_outcome = Arc::new(execution_outcome); - - // Create pending state for subsequent builds. - // Use the locally built block hash for both parent matching and speculative - // execution context to avoid split-hash ambiguity. - let local_block_hash = block.hash(); - if local_block_hash != args.last_flashblock_hash { - trace!( - target: "flashblocks", - local_block_hash = ?local_block_hash, - sequencer_block_hash = ?args.last_flashblock_hash, - block_number = block.number(), - "Local block hash differs from sequencer-provided hash; speculative chaining will follow local hash" - ); - } - let sealed_header = SealedHeader::new(block.header().clone(), local_block_hash); - let pending_state = PendingBlockState::new( - local_block_hash, - block.number(), - args.base.parent_hash, - canonical_anchor, - execution_outcome.clone(), - request_cache.clone(), - ) - .with_sealed_header(sealed_header); - - let pending_block = PendingBlock::with_executed_block( - Instant::now() + Duration::from_secs(1), - ExecutedBlock::new( - block.into(), - execution_outcome, - ComputedTrieData::without_trie_input( - Arc::new(hashed_state.into_sorted()), - Arc::default(), - ), - ), - ); - let pending_flashblock = PendingFlashBlock::new( - pending_block, - canonical_anchor, - args.last_flashblock_index, - args.last_flashblock_hash, - args.compute_state_root, - ); - - Ok(Some(BuildResult { pending_flashblock, cached_reads: request_cache, pending_state })) - } - - fn merge_cached_and_suffix_results( - cached_prefix: CachedPrefixExecutionResult, - mut suffix_result: BlockExecutionResult, - ) -> BlockExecutionResult { - N::Receipt::add_cumulative_gas_offset(&mut suffix_result.receipts, cached_prefix.gas_used); - - let mut receipts = cached_prefix.receipts; - receipts.extend(suffix_result.receipts); - - // Use only suffix requests: the suffix executor's finish() produces - // post-execution requests from the complete block state (cached prestate + - // suffix changes). The cached prefix requests came from an intermediate - // state and must not be merged. - let requests = suffix_result.requests; - - BlockExecutionResult { - receipts, - requests, - gas_used: cached_prefix.gas_used.saturating_add(suffix_result.gas_used), - blob_gas_used: cached_prefix.blob_gas_used.saturating_add(suffix_result.blob_gas_used), - } - } -} - -#[inline] -fn is_consistent_speculative_parent_hashes( - incoming_parent_hash: B256, - pending_block_hash: B256, - pending_sealed_hash: B256, -) -> bool { - incoming_parent_hash == pending_block_hash && pending_block_hash == pending_sealed_hash -} - -#[cfg(test)] -mod tests { - use super::{is_consistent_speculative_parent_hashes, BuildArgs, FlashBlockBuilder}; - use crate::execution::cache::CachedExecutionMeta; - use alloy_consensus::{SignableTransaction, TxEip1559}; - use alloy_eips::eip2718::Encodable2718; - use alloy_network::TxSignerSync; - use alloy_primitives::{Address, StorageKey, StorageValue, TxKind, B256, U256}; - use alloy_signer_local::PrivateKeySigner; - use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; - use op_revm::constants::L1_BLOCK_CONTRACT; - use reth_optimism_chainspec::OP_MAINNET; - use reth_optimism_evm::OpEvmConfig; - use reth_optimism_primitives::{OpBlock, OpPrimitives, OpTransactionSigned}; - use reth_primitives_traits::{AlloyBlockHeader, Recovered, SignerRecoverable}; - use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; - use reth_provider::ChainSpecProvider; - use reth_storage_api::BlockReaderIdExt; - use std::str::FromStr; - - fn signed_transfer_tx( - signer: &PrivateKeySigner, - nonce: u64, - recipient: Address, - ) -> OpTransactionSigned { - let mut tx = TxEip1559 { - chain_id: 10, // OP Mainnet chain id - nonce, - gas_limit: 100_000, - max_priority_fee_per_gas: 1_000_000_000, - max_fee_per_gas: 2_000_000_000, - to: TxKind::Call(recipient), - value: U256::from(1), - ..Default::default() - }; - let signature = signer.sign_transaction_sync(&mut tx).expect("signing tx succeeds"); - tx.into_signed(signature).into() - } - - fn into_encoded_recovered( - tx: OpTransactionSigned, - signer: Address, - ) -> alloy_eips::eip2718::WithEncoded> { - let encoded = tx.encoded_2718(); - Recovered::new_unchecked(tx, signer).into_encoded_with(encoded) - } - - #[test] - fn speculative_parent_hashes_must_all_match() { - let h = B256::repeat_byte(0x11); - assert!(is_consistent_speculative_parent_hashes(h, h, h)); - } - - #[test] - fn speculative_parent_hashes_reject_any_mismatch() { - let incoming = B256::repeat_byte(0x11); - let pending = B256::repeat_byte(0x22); - let sealed = B256::repeat_byte(0x33); - - assert!(!is_consistent_speculative_parent_hashes(incoming, pending, sealed)); - assert!(!is_consistent_speculative_parent_hashes(incoming, incoming, sealed)); - assert!(!is_consistent_speculative_parent_hashes(incoming, pending, pending)); - } - - #[test] - fn canonical_build_reuses_cached_prefix_execution() { - let provider = MockEthProvider::::new().with_chain_spec(OP_MAINNET.clone()); - let genesis_hash = provider.chain_spec().genesis_hash(); - let genesis_block = - OpBlock::new(provider.chain_spec().genesis_header().clone(), Default::default()); - provider.add_block(genesis_hash, genesis_block); - - let recipient = Address::repeat_byte(0x22); - let signer = PrivateKeySigner::random(); - let tx_a = signed_transfer_tx(&signer, 0, recipient); - let tx_b = signed_transfer_tx(&signer, 1, recipient); - let tx_c = signed_transfer_tx(&signer, 2, recipient); - let signer = tx_a.recover_signer().expect("tx signer recovery succeeds"); - - provider.add_account(signer, ExtendedAccount::new(0, U256::from(1_000_000_000_000_000u64))); - provider.add_account(recipient, ExtendedAccount::new(0, U256::ZERO)); - provider.add_account( - L1_BLOCK_CONTRACT, - ExtendedAccount::new(1, U256::ZERO).extend_storage([ - (StorageKey::with_last_byte(1), StorageValue::from(1_000_000_000u64)), - (StorageKey::with_last_byte(5), StorageValue::from(188u64)), - (StorageKey::with_last_byte(6), StorageValue::from(684_000u64)), - ( - StorageKey::with_last_byte(3), - StorageValue::from_str( - "0x0000000000000000000000000000000000001db0000d27300000000000000005", - ) - .expect("valid L1 fee scalar storage value"), - ), - ]), - ); - - let latest = provider - .latest_header() - .expect("provider latest header query succeeds") - .expect("genesis header exists"); - - let base = OpFlashblockPayloadBase { - parent_hash: latest.hash(), - parent_beacon_block_root: B256::ZERO, - fee_recipient: Address::ZERO, - prev_randao: B256::repeat_byte(0x55), - block_number: latest.number() + 1, - gas_limit: 30_000_000, - timestamp: latest.timestamp() + 2, - extra_data: Default::default(), - base_fee_per_gas: U256::from(1_000_000_000u64), - }; - let base_parent_hash = base.parent_hash; - - let tx_a_hash = B256::from(*tx_a.tx_hash()); - let tx_b_hash = B256::from(*tx_b.tx_hash()); - let tx_c_hash = B256::from(*tx_c.tx_hash()); - - let tx_a = into_encoded_recovered(tx_a, signer); - let tx_b = into_encoded_recovered(tx_b, signer); - let tx_c = into_encoded_recovered(tx_c, signer); - - let evm_config = OpEvmConfig::optimism(OP_MAINNET.clone()); - let mut builder = FlashBlockBuilder::::new(evm_config, provider); - - let first = builder - .execute(BuildArgs { - base: base.clone(), - transactions: vec![tx_a.clone(), tx_b.clone()], - cached_state: None, - last_flashblock_index: 0, - last_flashblock_hash: B256::repeat_byte(0xA0), - compute_state_root: false, - pending_parent: None, - }) - .expect("first build succeeds") - .expect("first build is canonical"); - - assert_eq!(first.pending_state.execution_outcome.result.receipts.len(), 2); - - let cached_hashes = vec![tx_a_hash, tx_b_hash]; - let (bundle, receipts, requests, gas_used, blob_gas_used, skip) = builder - .tx_cache - .get_resumable_state_with_execution_meta_for_parent( - base.block_number, - base_parent_hash, - &cached_hashes, - ) - .expect("cache should contain first build execution state"); - assert_eq!(skip, 2); - - let mut tampered_receipts = receipts.to_vec(); - tampered_receipts[0].as_receipt_mut().cumulative_gas_used = - tampered_receipts[0].as_receipt().cumulative_gas_used.saturating_add(17); - let expected_tampered_gas = tampered_receipts[0].as_receipt().cumulative_gas_used; - - builder.tx_cache.update_with_execution_meta_for_parent( - base.block_number, - base_parent_hash, - cached_hashes, - bundle.clone(), - tampered_receipts, - CachedExecutionMeta { requests: requests.clone(), gas_used, blob_gas_used }, - ); - - let second_hashes = vec![tx_a_hash, tx_b_hash, tx_c_hash]; - let (_, _, _, _, _, skip) = builder - .tx_cache - .get_resumable_state_with_execution_meta_for_parent( - base.block_number, - base_parent_hash, - &second_hashes, - ) - .expect("second tx list should extend cached prefix"); - assert_eq!(skip, 2); - - let second = builder - .execute(BuildArgs { - base, - transactions: vec![tx_a, tx_b, tx_c], - cached_state: None, - last_flashblock_index: 1, - last_flashblock_hash: B256::repeat_byte(0xA1), - compute_state_root: false, - pending_parent: None, - }) - .expect("second build succeeds") - .expect("second build is canonical"); - - let receipts = &second.pending_state.execution_outcome.result.receipts; - assert_eq!(receipts.len(), 3); - assert_eq!(receipts[0].as_receipt().cumulative_gas_used, expected_tampered_gas); - assert!( - receipts[2].as_receipt().cumulative_gas_used - > receipts[1].as_receipt().cumulative_gas_used - ); - } -} diff --git a/crates/flashblocks/src/lib.rs b/crates/flashblocks/src/lib.rs index e2189344..20bd2ec7 100644 --- a/crates/flashblocks/src/lib.rs +++ b/crates/flashblocks/src/lib.rs @@ -11,7 +11,6 @@ mod ws; mod test_utils; pub use cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}; -pub use execution::FlashblockCachedReceipt; pub use service::FlashblocksRpcService; pub use subscription::FlashblocksPubSub; pub use ws::WsFlashBlockStream; diff --git a/crates/flashblocks/src/subscription/rpc.rs b/crates/flashblocks/src/subscription/rpc.rs index 362e8fbe..53e1955e 100644 --- a/crates/flashblocks/src/subscription/rpc.rs +++ b/crates/flashblocks/src/subscription/rpc.rs @@ -3,7 +3,7 @@ use crate::{ EnrichedTransaction, FlashblockParams, FlashblockStreamEvent, FlashblockSubscriptionKind, FlashblocksFilter, }, - FlashblockCachedReceipt, PendingSequence, PendingSequenceRx, + PendingSequence, PendingSequenceRx, }; use futures::StreamExt; @@ -74,10 +74,7 @@ pub trait FlashblocksPubSubApi { /// Optimism-specific Ethereum pubsub handler that extends standard subscriptions with flashblocks support. #[derive(Clone)] -pub struct FlashblocksPubSub -where - N::Receipt: FlashblockCachedReceipt, -{ +pub struct FlashblocksPubSub { /// Standard eth pubsub handler eth_pubsub: EthPubSub, /// All nested flashblocks fields bundled together @@ -89,7 +86,6 @@ where Eth: RpcNodeCore + 'static, Eth::Provider: BlockNumReader, Eth::RpcConvert: RpcConvert + Clone, - N::Receipt: FlashblockCachedReceipt, { /// Creates a new, shareable instance. /// @@ -166,7 +162,6 @@ where Eth: RpcNodeCore + 'static, Eth::Provider: BlockNumReader, Eth::RpcConvert: RpcConvert + Clone, - N::Receipt: FlashblockCachedReceipt, { async fn subscribe( &self, @@ -201,10 +196,7 @@ where } #[derive(Clone)] -pub struct FlashblocksPubSubInner -where - N::Receipt: FlashblockCachedReceipt, -{ +pub struct FlashblocksPubSubInner { /// Pending block receiver from flashblocks, if available pub(crate) pending_block_rx: PendingSequenceRx, /// The type that's used to spawn subscription tasks. @@ -219,7 +211,6 @@ impl FlashblocksPubSubInner where Eth: RpcNodeCore + 'static, Eth::RpcConvert: RpcConvert + Clone, - N::Receipt: FlashblockCachedReceipt, { fn new_flashblocks_stream( &self, @@ -497,10 +488,7 @@ where /// Extract `Header` from `PendingFlashBlock` fn extract_header_from_pending_block( pending_block: &PendingSequence, -) -> Result, ErrorObject<'static>> -where - N::Receipt: FlashblockCachedReceipt, -{ +) -> Result, ErrorObject<'static>> { let block = pending_block.block(); Ok(Header::from_consensus( block.clone_sealed_header().into(), diff --git a/crates/flashblocks/src/validation.rs b/crates/flashblocks/src/validation.rs deleted file mode 100644 index d5012eba..00000000 --- a/crates/flashblocks/src/validation.rs +++ /dev/null @@ -1,599 +0,0 @@ -//! Flashblock sequence validation and reorganization detection. -//! -//! Provides stateless validation logic for flashblock sequencing and chain reorg detection. -//! -//! This module contains three main components: -//! -//! 1. [`FlashblockSequenceValidator`] - Validates that incoming flashblocks follow the expected -//! sequence ordering (consecutive indices within a block, proper block transitions). -//! -//! 2. [`ReorgDetector`] - Detects chain reorganizations by comparing full block fingerprints (block -//! hash, parent hash, and transaction hashes) between tracked (pending) state and canonical -//! chain state. -//! -//! 3. [`CanonicalBlockReconciler`] - Determines the appropriate strategy for reconciling pending -//! flashblock state when new canonical blocks arrive. - -use alloy_primitives::B256; - -/// Result of validating a flashblock's position in the sequence. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum SequenceValidationResult { - /// Next consecutive flashblock within the current block (same block, index + 1). - NextInSequence, - /// First flashblock (index 0) of the next block (block + 1). - FirstOfNextBlock, - /// Duplicate flashblock (same block and index) - should be ignored. - Duplicate, - /// Non-sequential index within the same block - indicates missed flashblocks. - NonSequentialGap { - /// Expected flashblock index. - expected: u64, - /// Actual incoming flashblock index. - actual: u64, - }, - /// New block received with non-zero index - missed the base flashblock. - InvalidNewBlockIndex { - /// Block number of the incoming flashblock. - block_number: u64, - /// The invalid (non-zero) index received. - index: u64, - }, -} - -/// Stateless validator for flashblock sequence ordering. -/// -/// Flashblocks must arrive in strict sequential order: -/// - Within a block: indices must be consecutive (0, 1, 2, ...) -/// - Across blocks: new block must start with index 0 and be exactly `block_number + 1` -/// -/// # Example -/// -/// ``` -/// use xlayer_flashblocks::validation::{ -/// FlashblockSequenceValidator, SequenceValidationResult, -/// }; -/// -/// // Valid: next flashblock in sequence -/// let result = FlashblockSequenceValidator::validate(100, 2, 100, 3); -/// assert_eq!(result, SequenceValidationResult::NextInSequence); -/// -/// // Valid: first flashblock of next block -/// let result = FlashblockSequenceValidator::validate(100, 5, 101, 0); -/// assert_eq!(result, SequenceValidationResult::FirstOfNextBlock); -/// -/// // Invalid: gap in sequence -/// let result = FlashblockSequenceValidator::validate(100, 2, 100, 5); -/// assert!(matches!(result, SequenceValidationResult::NonSequentialGap { .. })); -/// ``` -#[derive(Debug, Clone, Copy, Default)] -pub struct FlashblockSequenceValidator; - -impl FlashblockSequenceValidator { - /// Validates whether an incoming flashblock follows the expected sequence. - /// - /// Returns the appropriate [`SequenceValidationResult`] based on: - /// - Same block, index + 1 → `NextInSequence` - /// - Next block, index 0 → `FirstOfNextBlock` - /// - Same block and index → `Duplicate` - /// - Same block, wrong index → `NonSequentialGap` - /// - Different block, non-zero index or block gap → `InvalidNewBlockIndex` - pub const fn validate( - latest_block_number: u64, - latest_flashblock_index: u64, - incoming_block_number: u64, - incoming_index: u64, - ) -> SequenceValidationResult { - // Next flashblock within the current block - if incoming_block_number == latest_block_number - && incoming_index == latest_flashblock_index + 1 - { - SequenceValidationResult::NextInSequence - // First flashblock of the next block - } else if incoming_block_number == latest_block_number + 1 && incoming_index == 0 { - SequenceValidationResult::FirstOfNextBlock - // New block with non-zero index or block gap - } else if incoming_block_number != latest_block_number { - SequenceValidationResult::InvalidNewBlockIndex { - block_number: incoming_block_number, - index: incoming_index, - } - } else if incoming_index == latest_flashblock_index { - // Duplicate flashblock - SequenceValidationResult::Duplicate - } else { - // Non-sequential index within the same block - SequenceValidationResult::NonSequentialGap { - expected: latest_flashblock_index + 1, - actual: incoming_index, - } - } - } -} - -/// Fingerprint for a tracked block (pending/cached sequence). -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct TrackedBlockFingerprint { - /// Block number. - pub block_number: u64, - /// Block hash. - pub block_hash: B256, - /// Parent hash. - pub parent_hash: B256, - /// Ordered transaction hashes in the block. - pub tx_hashes: Vec, -} - -/// Fingerprint for a canonical block notification. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct CanonicalBlockFingerprint { - /// Block number. - pub block_number: u64, - /// Block hash. - pub block_hash: B256, - /// Parent hash. - pub parent_hash: B256, - /// Ordered transaction hashes in the block. - pub tx_hashes: Vec, -} - -/// Result of a reorganization detection check. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum ReorgDetectionResult { - /// Tracked and canonical fingerprints match exactly. - NoReorg, - /// Tracked and canonical fingerprints differ. - ReorgDetected, -} - -impl ReorgDetectionResult { - /// Returns `true` if a reorganization was detected. - #[inline] - pub const fn is_reorg(&self) -> bool { - matches!(self, Self::ReorgDetected) - } - - /// Returns `true` if no reorganization was detected. - #[inline] - pub const fn is_no_reorg(&self) -> bool { - matches!(self, Self::NoReorg) - } -} - -/// Detects chain reorganizations by comparing full block fingerprints. -/// -/// A reorg is detected when any fingerprint component differs: -/// - Block hash -/// - Parent hash -/// - Transaction hash list (including ordering) -/// -/// # Example -/// -/// ``` -/// use alloy_primitives::B256; -/// use xlayer_flashblocks::validation::{ -/// CanonicalBlockFingerprint, ReorgDetectionResult, ReorgDetector, TrackedBlockFingerprint, -/// }; -/// -/// let tracked = TrackedBlockFingerprint { -/// block_number: 100, -/// block_hash: B256::repeat_byte(0xAA), -/// parent_hash: B256::repeat_byte(0x11), -/// tx_hashes: vec![B256::repeat_byte(1), B256::repeat_byte(2)], -/// }; -/// let canonical = CanonicalBlockFingerprint { -/// block_number: 100, -/// block_hash: B256::repeat_byte(0xAA), -/// parent_hash: B256::repeat_byte(0x11), -/// tx_hashes: vec![B256::repeat_byte(1), B256::repeat_byte(2)], -/// }; -/// -/// let result = ReorgDetector::detect(&tracked, &canonical); -/// assert_eq!(result, ReorgDetectionResult::NoReorg); -/// ``` -#[derive(Debug, Clone, Copy, Default)] -pub struct ReorgDetector; - -impl ReorgDetector { - /// Compares tracked vs canonical block fingerprints to detect reorgs. - pub fn detect( - tracked: &TrackedBlockFingerprint, - canonical: &CanonicalBlockFingerprint, - ) -> ReorgDetectionResult { - if tracked.block_hash == canonical.block_hash - && tracked.parent_hash == canonical.parent_hash - && tracked.tx_hashes == canonical.tx_hashes - { - ReorgDetectionResult::NoReorg - } else { - ReorgDetectionResult::ReorgDetected - } - } -} - -/// Strategy for reconciling pending state with canonical state on new canonical blocks. -/// -/// When a new canonical block arrives, the system must decide how to update -/// the pending flashblock state. This enum represents the possible strategies. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum ReconciliationStrategy { - /// Canonical caught up or passed pending (canonical >= latest pending). Clear pending state. - CatchUp, - /// Reorg detected (tx mismatch). Rebuild pending from canonical. - HandleReorg, - /// Pending too far ahead of canonical. - DepthLimitExceeded { - /// Current depth of pending blocks. - depth: u64, - /// Configured maximum depth. - max_depth: u64, - }, - /// No issues - continue building on pending state. - Continue, - /// No pending state exists (startup or after clear). - NoPendingState, -} - -/// Determines reconciliation strategy for canonical block updates. -/// -/// This reconciler helps maintain consistency between pending flashblock state -/// and the canonical chain. It's used when new canonical blocks arrive to -/// determine whether to: -/// - Clear pending state (canonical caught up) -/// - Rebuild pending state (reorg detected) -/// - Continue as-is (pending still ahead and valid) -/// -/// # Priority Order -/// -/// The reconciler checks conditions in this order: -/// 1. `NoPendingState` - No pending state to reconcile -/// 2. `CatchUp` - Canonical has caught up to or passed pending -/// 3. `HandleReorg` - Reorg detected (takes precedence over depth limit) -/// 4. `DepthLimitExceeded` - Pending is too far ahead -/// 5. `Continue` - Everything is fine, keep building -/// -/// # Example -/// -/// ``` -/// use xlayer_flashblocks::validation::{CanonicalBlockReconciler, ReconciliationStrategy}; -/// -/// // Canonical caught up to pending -/// let strategy = CanonicalBlockReconciler::reconcile( -/// Some(100), // earliest pending -/// Some(105), // latest pending -/// 105, // canonical block number -/// 10, // max depth -/// false, // no reorg detected -/// ); -/// assert_eq!(strategy, ReconciliationStrategy::CatchUp); -/// ``` -#[derive(Debug, Clone, Copy, Default)] -pub struct CanonicalBlockReconciler; - -impl CanonicalBlockReconciler { - /// Returns the appropriate [`ReconciliationStrategy`] based on pending vs canonical state. - /// - /// Priority: `NoPendingState` → `CatchUp` → `HandleReorg` → `DepthLimitExceeded` → `Continue` - pub const fn reconcile( - pending_earliest_block: Option, - pending_latest_block: Option, - canonical_block_number: u64, - max_depth: u64, - reorg_detected: bool, - ) -> ReconciliationStrategy { - // Check if pending state exists - let latest = match (pending_earliest_block, pending_latest_block) { - (Some(_e), Some(l)) => l, - _ => return ReconciliationStrategy::NoPendingState, - }; - - // Check if canonical has caught up or passed pending - if latest <= canonical_block_number { - return ReconciliationStrategy::CatchUp; - } - - // Check for reorg - if reorg_detected { - return ReconciliationStrategy::HandleReorg; - } - - // Check depth limit: how many pending blocks are ahead of canonical tip. - let depth = latest.saturating_sub(canonical_block_number); - if depth > max_depth { - return ReconciliationStrategy::DepthLimitExceeded { depth, max_depth }; - } - - // No issues, continue building - ReconciliationStrategy::Continue - } -} - -#[cfg(test)] -mod tests { - use super::*; - - // ==================== FlashblockSequenceValidator Tests ==================== - - mod sequence_validator { - use super::*; - - #[test] - fn test_next_in_sequence() { - // Consecutive indices within the same block - assert_eq!( - FlashblockSequenceValidator::validate(100, 2, 100, 3), - SequenceValidationResult::NextInSequence - ); - assert_eq!( - FlashblockSequenceValidator::validate(100, 0, 100, 1), - SequenceValidationResult::NextInSequence - ); - assert_eq!( - FlashblockSequenceValidator::validate(100, 999, 100, 1000), - SequenceValidationResult::NextInSequence - ); - assert_eq!( - FlashblockSequenceValidator::validate(0, 0, 0, 1), - SequenceValidationResult::NextInSequence - ); - } - - #[test] - fn test_first_of_next_block() { - // Index 0 of the next block - assert_eq!( - FlashblockSequenceValidator::validate(0, 0, 1, 0), - SequenceValidationResult::FirstOfNextBlock - ); - assert_eq!( - FlashblockSequenceValidator::validate(100, 5, 101, 0), - SequenceValidationResult::FirstOfNextBlock - ); - assert_eq!( - FlashblockSequenceValidator::validate(100, 0, 101, 0), - SequenceValidationResult::FirstOfNextBlock - ); - assert_eq!( - FlashblockSequenceValidator::validate(999999, 10, 1000000, 0), - SequenceValidationResult::FirstOfNextBlock - ); - } - - #[test] - fn test_duplicate() { - // Same block and index - assert_eq!( - FlashblockSequenceValidator::validate(100, 5, 100, 5), - SequenceValidationResult::Duplicate - ); - assert_eq!( - FlashblockSequenceValidator::validate(100, 0, 100, 0), - SequenceValidationResult::Duplicate - ); - } - - #[test] - fn test_non_sequential_gap() { - // Non-consecutive indices within the same block - assert_eq!( - FlashblockSequenceValidator::validate(100, 2, 100, 4), - SequenceValidationResult::NonSequentialGap { expected: 3, actual: 4 } - ); - assert_eq!( - FlashblockSequenceValidator::validate(100, 0, 100, 10), - SequenceValidationResult::NonSequentialGap { expected: 1, actual: 10 } - ); - // Going backwards - assert_eq!( - FlashblockSequenceValidator::validate(100, 5, 100, 3), - SequenceValidationResult::NonSequentialGap { expected: 6, actual: 3 } - ); - } - - #[test] - fn test_invalid_new_block_index() { - // New block with non-zero index - assert_eq!( - FlashblockSequenceValidator::validate(100, 5, 101, 1), - SequenceValidationResult::InvalidNewBlockIndex { block_number: 101, index: 1 } - ); - // Block gap - assert_eq!( - FlashblockSequenceValidator::validate(100, 5, 105, 3), - SequenceValidationResult::InvalidNewBlockIndex { block_number: 105, index: 3 } - ); - assert_eq!( - FlashblockSequenceValidator::validate(100, 5, 102, 0), - SequenceValidationResult::InvalidNewBlockIndex { block_number: 102, index: 0 } - ); - // Going backwards in block number - assert_eq!( - FlashblockSequenceValidator::validate(100, 5, 99, 0), - SequenceValidationResult::InvalidNewBlockIndex { block_number: 99, index: 0 } - ); - } - } - - // ==================== ReorgDetector Tests ==================== - - mod reorg_detector { - use super::*; - - fn tracked( - block_hash: B256, - parent_hash: B256, - tx_hashes: Vec, - ) -> TrackedBlockFingerprint { - TrackedBlockFingerprint { block_number: 100, block_hash, parent_hash, tx_hashes } - } - - fn canonical( - block_hash: B256, - parent_hash: B256, - tx_hashes: Vec, - ) -> CanonicalBlockFingerprint { - CanonicalBlockFingerprint { block_number: 100, block_hash, parent_hash, tx_hashes } - } - - #[test] - fn test_no_reorg_identical_fingerprint() { - let hashes = vec![B256::repeat_byte(0x01), B256::repeat_byte(0x02)]; - let tracked = tracked(B256::repeat_byte(0xAA), B256::repeat_byte(0x11), hashes.clone()); - let canonical = canonical(B256::repeat_byte(0xAA), B256::repeat_byte(0x11), hashes); - assert_eq!(ReorgDetector::detect(&tracked, &canonical), ReorgDetectionResult::NoReorg); - } - - #[test] - fn test_reorg_on_parent_hash_mismatch_with_identical_txs() { - let hashes = vec![B256::repeat_byte(0x01), B256::repeat_byte(0x02)]; - let tracked = tracked(B256::repeat_byte(0xAA), B256::repeat_byte(0x11), hashes.clone()); - let canonical = canonical(B256::repeat_byte(0xAA), B256::repeat_byte(0x22), hashes); - - assert_eq!( - ReorgDetector::detect(&tracked, &canonical), - ReorgDetectionResult::ReorgDetected - ); - } - - #[test] - fn test_reorg_on_block_hash_mismatch_with_identical_txs() { - let hashes = vec![B256::repeat_byte(0x01), B256::repeat_byte(0x02)]; - let tracked = tracked(B256::repeat_byte(0xAA), B256::repeat_byte(0x11), hashes.clone()); - let canonical = canonical(B256::repeat_byte(0xBB), B256::repeat_byte(0x11), hashes); - - assert_eq!( - ReorgDetector::detect(&tracked, &canonical), - ReorgDetectionResult::ReorgDetected - ); - } - - #[test] - fn test_reorg_on_tx_hash_mismatch() { - let tracked = tracked( - B256::repeat_byte(0xAA), - B256::repeat_byte(0x11), - vec![B256::repeat_byte(0x01), B256::repeat_byte(0x02)], - ); - let canonical = canonical( - B256::repeat_byte(0xAA), - B256::repeat_byte(0x11), - vec![B256::repeat_byte(0x01), B256::repeat_byte(0x03)], - ); - - assert_eq!( - ReorgDetector::detect(&tracked, &canonical), - ReorgDetectionResult::ReorgDetected - ); - } - - #[test] - fn test_result_helpers() { - let no_reorg = ReorgDetectionResult::NoReorg; - assert!(no_reorg.is_no_reorg()); - assert!(!no_reorg.is_reorg()); - - let reorg = ReorgDetectionResult::ReorgDetected; - assert!(reorg.is_reorg()); - assert!(!reorg.is_no_reorg()); - } - } - - // ==================== CanonicalBlockReconciler Tests ==================== - - mod reconciler { - use super::*; - - #[test] - fn test_no_pending_state() { - assert_eq!( - CanonicalBlockReconciler::reconcile(None, None, 100, 10, false), - ReconciliationStrategy::NoPendingState - ); - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), None, 100, 10, false), - ReconciliationStrategy::NoPendingState - ); - assert_eq!( - CanonicalBlockReconciler::reconcile(None, Some(100), 100, 10, false), - ReconciliationStrategy::NoPendingState - ); - } - - #[test] - fn test_catchup() { - // Canonical equals latest pending - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(105), 105, 10, false), - ReconciliationStrategy::CatchUp - ); - // Canonical passed latest pending - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(105), 110, 10, false), - ReconciliationStrategy::CatchUp - ); - // Single pending block - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(100), 100, 10, false), - ReconciliationStrategy::CatchUp - ); - // CatchUp takes priority over reorg - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(105), 105, 10, true), - ReconciliationStrategy::CatchUp - ); - } - - #[test] - fn test_handle_reorg() { - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(110), 102, 10, true), - ReconciliationStrategy::HandleReorg - ); - // Reorg takes priority over depth limit - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(130), 120, 10, true), - ReconciliationStrategy::HandleReorg - ); - } - - #[test] - fn test_depth_limit_exceeded() { - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(120), 115, 10, false), - ReconciliationStrategy::Continue - ); - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(105), 101, 0, false), - ReconciliationStrategy::DepthLimitExceeded { depth: 4, max_depth: 0 } - ); - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(200), 130, 64, false), - ReconciliationStrategy::DepthLimitExceeded { depth: 70, max_depth: 64 } - ); - } - - #[test] - fn test_continue() { - // Normal case: pending ahead of canonical - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(110), 105, 10, false), - ReconciliationStrategy::Continue - ); - // Exactly at depth limit - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(120), 110, 10, false), - ReconciliationStrategy::Continue - ); - // Canonical at earliest pending - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(105), 100, 10, false), - ReconciliationStrategy::Continue - ); - // Zero depth is OK with max_depth=0 - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(105), 100, 0, false), - ReconciliationStrategy::DepthLimitExceeded { depth: 5, max_depth: 0 } - ); - } - } -} diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index a7610790..febf835f 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -34,6 +34,7 @@ alloy-serde.workspace = true # op op-alloy-network.workspace = true +op-alloy-rpc-types.workspace = true # rpc jsonrpsee.workspace = true From 7e4c5588f7a42e2fb292a3a68f1c718e83353570 Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 20 Mar 2026 10:38:00 +0800 Subject: [PATCH 43/76] fix(flashblocks): resolve merging receipt index on incremental builds, better refactors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.6 --- crates/flashblocks/src/execution/validator.rs | 43 ++++++++++++------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index ce09fbd4..c397035f 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -203,10 +203,10 @@ where .with_block_hash(Some(anchor_hash)) .with_lazy_overlay(lazy_overlay); - // Spawn the appropriate processor based on strategy + // Spawn the appropriate processor based on strategy. let mut handle = self.spawn_payload_processor( execution_env.clone(), - block_transactions.clone(), + transactions.clone(), provider_builder, overlay_factory.clone(), strategy, @@ -229,7 +229,7 @@ where &parent_header, attrs, transactions, - pending_sequence, + pending_sequence.as_ref(), &mut handle, )?; @@ -488,7 +488,7 @@ where parent_header: &SealedHeaderFor, attrs: EvmConfig::NextBlockEnvCtx, transactions: Vec>>, - pending_sequence: Option>, + pending_sequence: Option<&PendingSequence>, handle: &mut PayloadHandle, ) -> eyre::Result<( BlockExecutionOutput, @@ -504,12 +504,11 @@ where { // Build state let mut read_cache = pending_sequence - .as_ref() .map(|p| p.prefix_execution_meta.cached_reads.clone()) .unwrap_or_default(); let cached_db = read_cache.as_db_mut(StateProviderDatabase::new(state_provider)); let mut state_builder = State::builder().with_database(cached_db).with_bundle_update(); - if let Some(seq) = pending_sequence.as_ref() { + if let Some(seq) = pending_sequence { state_builder = state_builder .with_bundle_prestate(seq.pending.executed_block.execution_output.state.clone()); } @@ -533,7 +532,8 @@ where // Spawn background task to compute receipt root and logs bloom incrementally. // Unbounded channel is used since tx count bounds capacity anyway (max ~30k txs per block). - let receipts_len = transactions.len(); + let prefix_receipt_count = pending_sequence.map_or(0, |s| s.pending.receipts.len()); + let receipts_len = prefix_receipt_count + transactions.len(); let (receipt_tx, receipt_rx) = crossbeam_channel::unbounded(); let (result_tx, result_rx) = tokio::sync::oneshot::channel(); let task_handle = ReceiptRootTaskHandle::new(receipt_rx, result_tx); @@ -548,9 +548,9 @@ where } // Execute all transactions and finalize - let (executor, senders) = self.execute_transactions( + let (executor, suffix_senders) = self.execute_transactions( executor, - pending_sequence.as_ref(), + pending_sequence, transaction_count, handle, &receipt_tx, @@ -559,13 +559,22 @@ where // Finish execution and get the result let (_evm, mut result) = executor.finish().map(|(evm, result)| (evm.into_db(), result))?; - if let Some(seq) = pending_sequence.as_ref() { + if let Some(seq) = pending_sequence { result = Self::merge_suffix_results( &seq.prefix_execution_meta, seq.pending.receipts.as_ref().clone(), result, ); } + // Reconstruct full senders list + let senders = if let Some(seq) = pending_sequence { + let mut all_senders = seq.pending.executed_block.recovered_block.senders().to_vec(); + all_senders.extend(suffix_senders); + all_senders + } else { + suffix_senders + }; + // Merge transitions into bundle state db.merge_transitions(BundleRetention::Reverts); @@ -599,12 +608,16 @@ where EvmConfig: ConfigureEvm + Unpin> + 'static, { - // Send all previously executed receipts to the receipt root task for incremental builds - if let Some(seq) = pending_sequence { + // Send all previously executed receipts to the receipt root task for incremental builds. + let receipt_index_offset = if let Some(seq) = pending_sequence { + let prefix_count = seq.pending.receipts.len(); for (index, receipt) in seq.pending.receipts.iter().enumerate() { let _ = receipt_tx.send(IndexedReceipt::new(index, receipt.clone())); } - } + prefix_count + } else { + 0 + }; let mut senders = Vec::with_capacity(transaction_count); let mut transactions = handle.iter_transactions().into_iter(); @@ -629,7 +642,7 @@ where last_sent_len = current_len; // Send the latest receipt to the background task for incremental root computation. if let Some(receipt) = executor.receipts().last() { - let tx_index = current_len - 1; + let tx_index = receipt_index_offset + current_len - 1; let _ = receipt_tx.send(IndexedReceipt::new(tx_index, receipt.clone())); } } @@ -901,7 +914,7 @@ where .flashblocks_state .get_overlay_data(&parent_hash) .map(|(blocks, _, anchor_hash)| (blocks, anchor_hash)) - .unwrap_or_else(|| (vec![], B256::ZERO)); + .unwrap_or_else(|| (vec![], parent_hash)); if blocks.is_empty() { debug!(target: "flashblocks::validator", "Parent found on disk, no lazy overlay needed"); From 7da6e81a8795ffded018cebc92bf851294481d2a Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 20 Mar 2026 11:37:47 +0800 Subject: [PATCH 44/76] fix(flashblocks): resolve passing overlay data to generating lazyoverlays MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.6 --- crates/flashblocks/src/execution/validator.rs | 31 +++++++++---------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index c397035f..eb667d27 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -192,7 +192,8 @@ where // Create lazy overlay from ancestors - this doesn't block, allowing execution to start // before the trie data is ready. The overlay will be computed on first access. - let (lazy_overlay, anchor_hash) = self.get_parent_lazy_overlay(parent_hash); + let (lazy_overlay, anchor_hash) = + Self::get_parent_lazy_overlay(overlay_data.as_ref(), parent_hash); // Create overlay factory for payload processor (StateRootTask path needs it for // multiproofs) @@ -445,8 +446,7 @@ where "height mismatch: incoming={incoming_block_number}, pending={pending_height}" )); } - let incremental = pending_height == incoming_block_number; - if incremental { + if pending_height == incoming_block_number { // Validate states of last executed flashblock index let last_index = pending.prefix_execution_meta.last_flashblock_index; if last_index.saturating_add(1) != incoming_index { @@ -470,7 +470,7 @@ where "height mismatch: incoming={incoming_block_number}, canonical={canon_height}" )); } - return Ok(None); + Ok(None) } /// Executes a block with the given state provider. @@ -562,7 +562,7 @@ where if let Some(seq) = pending_sequence { result = Self::merge_suffix_results( &seq.prefix_execution_meta, - seq.pending.receipts.as_ref().clone(), + (*seq.pending.receipts).clone(), result, ); } @@ -620,7 +620,7 @@ where }; let mut senders = Vec::with_capacity(transaction_count); - let mut transactions = handle.iter_transactions().into_iter(); + let mut transactions = handle.iter_transactions(); // Some executors may execute transactions that do not append receipts during the // main loop (e.g., system transactions whose receipts are added during finalization). @@ -905,16 +905,15 @@ where /// block hash (the highest persisted ancestor). This allows execution to start immediately /// while the trie input computation is deferred until the overlay is actually needed. /// - /// If parent is on disk (no in-memory blocks), returns `None` for the lazy overlay. - /// - /// Uses a cached overlay if available for the canonical head (the common case). - fn get_parent_lazy_overlay(&self, parent_hash: B256) -> (Option, B256) { - // Get blocks leading to the parent to determine the anchor - let (blocks, anchor_hash) = self - .flashblocks_state - .get_overlay_data(&parent_hash) - .map(|(blocks, _, anchor_hash)| (blocks, anchor_hash)) - .unwrap_or_else(|| (vec![], parent_hash)); + /// If parent is on disk (no in-memory blocks), returns `(None, parent_hash)`. + fn get_parent_lazy_overlay( + overlay_data: Option<&(Vec>, B256)>, + parent_hash: B256, + ) -> (Option, B256) { + let Some((blocks, anchor)) = overlay_data else { + return (None, parent_hash); + }; + let anchor_hash = *anchor; if blocks.is_empty() { debug!(target: "flashblocks::validator", "Parent found on disk, no lazy overlay needed"); From 73800110386e7fe251c43202584265213e79ad08 Mon Sep 17 00:00:00 2001 From: Niven Date: Tue, 24 Mar 2026 14:30:41 +0800 Subject: [PATCH 45/76] feat(flashblocks): thread payload events sender to RPC service for engine tree pre-warming MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cherry-pick payload events handle from PayloadBuilderService through OnceLock so the FlashblocksRpcService can forward locally-built payloads to the engine state tree. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- Cargo.lock | 1 + bin/node/src/main.rs | 8 +++++++- bin/node/src/payload.rs | 8 +++++++- crates/builder/Cargo.toml | 1 + crates/builder/src/flashblocks/mod.rs | 2 +- crates/builder/src/flashblocks/service.rs | 12 +++++++++++- crates/builder/src/tests/framework/instance.rs | 3 ++- crates/flashblocks/src/service.rs | 6 +++++- 8 files changed, 35 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4884b34f..61d130b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14099,6 +14099,7 @@ dependencies = [ "reth-optimism-storage", "reth-optimism-txpool", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-payload-util", "reth-primitives", diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index 984ccf84..89cd0705 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -8,7 +8,7 @@ use payload::XLayerPayloadServiceBuilder; use args::XLayerArgs; use clap::Parser; use either::Either; -use std::sync::Arc; +use std::sync::{Arc, OnceLock}; use tracing::info; use reth::rpc::eth::EthApiTypes; @@ -122,12 +122,17 @@ fn main() { ); } + // Get the payload events tx for pre-warming the engine tree with locally built + // pending flashblocks sequence. + let events_sender = Arc::new(OnceLock::new()); + // Create the X Layer payload service builder // It handles both flashblocks and default modes internally let payload_builder = XLayerPayloadServiceBuilder::new( args.xlayer_args.builder.clone(), args.xlayer_args.flashblocks_rpc.flashblock_url.is_some(), args.rollup_args.compute_pending_block, + events_sender.clone(), )? .with_bridge_config(bridge_config); @@ -149,6 +154,7 @@ fn main() { stream, args.xlayer_args.builder.flashblocks, args.rollup_args.flashblocks_url.is_some(), + events_sender.get().cloned(), datadir, )?; service.spawn(); diff --git a/bin/node/src/payload.rs b/bin/node/src/payload.rs index 52937096..b117fa33 100644 --- a/bin/node/src/payload.rs +++ b/bin/node/src/payload.rs @@ -1,3 +1,5 @@ +use std::sync::{Arc, OnceLock}; + use reth::builder::components::PayloadServiceBuilder; use reth_node_api::NodeTypes; use reth_node_builder::{components::BasicPayloadServiceBuilder, BuilderContext}; @@ -7,7 +9,7 @@ use reth_optimism_payload_builder::config::{OpDAConfig, OpGasLimitConfig}; use xlayer_bridge_intercept::BridgeInterceptConfig; use xlayer_builder::{ args::BuilderArgs, - flashblocks::{BuilderConfig, FlashblocksServiceBuilder}, + flashblocks::{BuilderConfig, FlashblocksServiceBuilder, PayloadEventsSender}, traits::{NodeBounds, PoolBounds}, }; @@ -30,6 +32,7 @@ impl XLayerPayloadServiceBuilder { xlayer_builder_args: BuilderArgs, flashblock_rpc: bool, compute_pending_block: bool, + events_sender: Arc>, ) -> eyre::Result { Self::with_config( xlayer_builder_args, @@ -37,6 +40,7 @@ impl XLayerPayloadServiceBuilder { compute_pending_block, OpDAConfig::default(), OpGasLimitConfig::default(), + events_sender, ) } @@ -46,12 +50,14 @@ impl XLayerPayloadServiceBuilder { compute_pending_block: bool, da_config: OpDAConfig, gas_limit_config: OpGasLimitConfig, + events_sender: Arc>, ) -> eyre::Result { let builder = if xlayer_builder_args.flashblocks.enabled || flashblock_rpc { let builder_config = BuilderConfig::try_from(xlayer_builder_args)?; XLayerPayloadServiceBuilderInner::Flashblocks(Box::new(FlashblocksServiceBuilder { config: builder_config, bridge_intercept: Default::default(), + events_sender, })) } else { let payload_builder = OpPayloadBuilder::new(compute_pending_block) diff --git a/crates/builder/Cargo.toml b/crates/builder/Cargo.toml index 472a3537..f6907db2 100644 --- a/crates/builder/Cargo.toml +++ b/crates/builder/Cargo.toml @@ -35,6 +35,7 @@ reth-rpc-engine-api.workspace = true reth-node-core.workspace = true reth-basic-payload-builder.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-execution-types.workspace = true reth-metrics.workspace = true reth-provider.workspace = true diff --git a/crates/builder/src/flashblocks/mod.rs b/crates/builder/src/flashblocks/mod.rs index 17148b3c..7aa6c267 100644 --- a/crates/builder/src/flashblocks/mod.rs +++ b/crates/builder/src/flashblocks/mod.rs @@ -20,7 +20,7 @@ mod timing; pub(crate) mod utils; pub use context::FlashblocksBuilderCtx; -pub use service::FlashblocksServiceBuilder; +pub use service::{FlashblocksServiceBuilder, PayloadEventsSender}; pub use utils::{cache::FlashblockPayloadsCache, wspub::WebSocketPublisher}; /// Configuration values that are specific to the flashblocks builder. diff --git a/crates/builder/src/flashblocks/service.rs b/crates/builder/src/flashblocks/service.rs index 854211bb..d671655d 100644 --- a/crates/builder/src/flashblocks/service.rs +++ b/crates/builder/src/flashblocks/service.rs @@ -16,18 +16,26 @@ use crate::{ traits::{NodeBounds, PoolBounds}, }; use eyre::WrapErr as _; -use std::{sync::Arc, time::Duration}; +use std::{ + sync::{Arc, OnceLock}, + time::Duration, +}; use reth_basic_payload_builder::BasicPayloadJobGeneratorConfig; use reth_node_api::NodeTypes; use reth_node_builder::{components::PayloadServiceBuilder, BuilderContext}; use reth_optimism_evm::OpEvmConfig; +use reth_optimism_node::OpEngineTypes; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; +use reth_payload_builder_primitives::Events; use reth_provider::CanonStateSubscriptions; +pub type PayloadEventsSender = tokio::sync::broadcast::Sender>; + pub struct FlashblocksServiceBuilder { pub config: BuilderConfig, pub bridge_intercept: xlayer_bridge_intercept::BridgeInterceptConfig, + pub events_sender: Arc>, } impl FlashblocksServiceBuilder { @@ -159,6 +167,8 @@ impl FlashblocksServiceBuilder { let (payload_service, payload_builder_handle) = PayloadBuilderService::new(payload_generator, ctx.provider().canonical_state_stream()); + let _ = self.events_sender.set(payload_service.payload_events_handle()); + let handler_ctx = FlashblockHandlerContext::new( &ctx.provider().clone(), self.config.clone(), diff --git a/crates/builder/src/tests/framework/instance.rs b/crates/builder/src/tests/framework/instance.rs index 6fddcf92..68244de0 100644 --- a/crates/builder/src/tests/framework/instance.rs +++ b/crates/builder/src/tests/framework/instance.rs @@ -41,7 +41,7 @@ use reth_optimism_rpc::OpEthApiBuilder; use reth_optimism_txpool::OpPooledTransaction; use reth_transaction_pool::{AllTransactionsEvents, TransactionPool}; use std::{ - sync::{Arc, LazyLock}, + sync::{Arc, LazyLock, OnceLock}, time::Instant, }; use tokio::{sync::oneshot, task::JoinHandle}; @@ -117,6 +117,7 @@ impl LocalInstance { FlashblocksServiceBuilder { config: builder_config, bridge_intercept: Default::default(), + events_sender: Arc::new(OnceLock::new()), }, )) .with_add_ons(addons) diff --git a/crates/flashblocks/src/service.rs b/crates/flashblocks/src/service.rs index 496ba274..975b0846 100644 --- a/crates/flashblocks/src/service.rs +++ b/crates/flashblocks/src/service.rs @@ -13,7 +13,7 @@ use reth_tasks::TaskExecutor; use xlayer_builder::{ args::FlashblocksArgs, - flashblocks::WebSocketPublisher, + flashblocks::{PayloadEventsSender, WebSocketPublisher}, metrics::{tokio::FlashblocksTaskMetrics, BuilderMetrics}, }; @@ -30,6 +30,8 @@ pub struct FlashblocksRpcService { ws_pub: Arc, /// Whether to relay flashblocks to the subscribers. relay_flashblocks: bool, + /// Payload events sender for forwarding locally-built payloads to the engine state tree. + events_sender: Option, /// Data directory for flashblocks persistence. datadir: ChainPath, } @@ -43,6 +45,7 @@ where incoming_flashblock_rx: S, args: FlashblocksArgs, relay_flashblocks: bool, + events_sender: Option, datadir: ChainPath, ) -> Result { let (received_flashblocks_tx, _) = tokio::sync::broadcast::channel(128); @@ -68,6 +71,7 @@ where task_executor, ws_pub, relay_flashblocks, + events_sender, datadir, }) } From 9e1b537f6362e5f04726de74c82cb0cafb34c177 Mon Sep 17 00:00:00 2001 From: Niven Date: Tue, 24 Mar 2026 14:32:17 +0800 Subject: [PATCH 46/76] Fmt --- crates/flashblocks/src/cache/confirm.rs | 2 +- crates/flashblocks/src/cache/mod.rs | 3 +-- crates/flashblocks/src/subscription/mod.rs | 2 +- crates/rpc/src/flashblocks.rs | 4 +--- crates/rpc/src/helper.rs | 8 ++++---- 5 files changed, 8 insertions(+), 11 deletions(-) diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index f99a1f83..aed427eb 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -214,7 +214,7 @@ impl ConfirmCache { /// Removes all tx index entries for the transactions in the given block. fn remove_tx_index_for_block(&mut self, block: &ConfirmedBlock) { for tx in block.executed_block.recovered_block.body().transactions() { - self.tx_index.remove(&*tx.tx_hash()); + self.tx_index.remove(tx.tx_hash()); } } diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index c033ad9f..5b9a7542 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -4,7 +4,6 @@ pub(crate) mod raw; pub(crate) mod utils; pub(crate) use confirm::ConfirmCache; -pub(crate) use raw::RawFlashblocksCache; pub use pending::PendingSequence; @@ -218,7 +217,7 @@ impl FlashblockStateCache { block_hash: &B256, ) -> Option<(Vec>, SealedHeaderFor, B256)> { let guard = self.inner.read(); - let block = guard.get_block_by_hash(&block_hash)?.block; + let block = guard.get_block_by_hash(block_hash)?.block; let block_num = block.number(); let canon_hash = guard.get_canon_info().1; let in_memory = guard.get_executed_blocks_up_to_height(block_num); diff --git a/crates/flashblocks/src/subscription/mod.rs b/crates/flashblocks/src/subscription/mod.rs index d9fc5e85..b54b4a31 100644 --- a/crates/flashblocks/src/subscription/mod.rs +++ b/crates/flashblocks/src/subscription/mod.rs @@ -1,4 +1,4 @@ pub mod pubsub; mod rpc; -pub use rpc::{FlashblocksPubSub, FlashblocksPubSubApiServer}; +pub use rpc::FlashblocksPubSub; diff --git a/crates/rpc/src/flashblocks.rs b/crates/rpc/src/flashblocks.rs index 8bc1c90a..587cf97f 100644 --- a/crates/rpc/src/flashblocks.rs +++ b/crates/rpc/src/flashblocks.rs @@ -431,8 +431,7 @@ where let chain = notification.committed(); if let Some((block, indexed_tx, receipt, all_receipts)) = chain.find_transaction_and_receipt_by_hash(hash) - { - if let Some(receipt) = convert_transaction_receipt( + && let Some(receipt) = convert_transaction_receipt( block, all_receipts, indexed_tx, @@ -444,7 +443,6 @@ where { return Ok(receipt); } - } } else { // Canonical stream ended break; diff --git a/crates/rpc/src/helper.rs b/crates/rpc/src/helper.rs index a500ddcf..bc7c9d41 100644 --- a/crates/rpc/src/helper.rs +++ b/crates/rpc/src/helper.rs @@ -52,11 +52,11 @@ pub(crate) fn to_rpc_block( where Rpc: RpcConvert, { - Ok(bar.block.clone_into_rpc_block( + bar.block.clone_into_rpc_block( full.into(), |tx, tx_info| converter.fill(tx, tx_info), |header, size| converter.convert_header(header, size), - )?) + ) } /// Converts all receipts from a `BlockAndReceipts` into RPC receipts. @@ -98,7 +98,7 @@ where }) .collect::>(); - Ok(converter.convert_receipts_with_block(inputs, bar.sealed_block())?) + converter.convert_receipts_with_block(inputs, bar.sealed_block()) } /// Converts a `CachedTxInfo` and `BlockAndReceipts` into an RPC transaction. @@ -111,7 +111,7 @@ where Rpc: RpcConvert, { let tx_info = build_tx_info(bar, info.tx.tx_hash(), info.tx_index); - Ok(converter.fill(info.tx.clone().try_into_recovered().expect("valid cached tx"), tx_info)?) + converter.fill(info.tx.clone().try_into_recovered().expect("valid cached tx"), tx_info) } /// Converts a `BlockAndReceipts` and transaction index into an RPC transaction. From 35f15098f2d561cf02c4f9b1baf7ca43702c4502 Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 25 Mar 2026 09:05:57 +0800 Subject: [PATCH 47/76] feat: Add flashblocks rpc handles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- Cargo.lock | 10 + bin/node/Cargo.toml | 7 + bin/node/src/main.rs | 32 +- crates/flashblocks/Cargo.toml | 19 +- crates/flashblocks/src/cache/mod.rs | 1 + crates/flashblocks/src/cache/raw.rs | 101 ++++-- crates/flashblocks/src/execution/mod.rs | 9 +- crates/flashblocks/src/execution/validator.rs | 16 +- crates/flashblocks/src/lib.rs | 8 +- .../flashblocks/src/{handle.rs => persist.rs} | 0 crates/flashblocks/src/service.rs | 304 ++++++++++++------ crates/flashblocks/src/state.rs | 222 +++++++++++++ 12 files changed, 562 insertions(+), 167 deletions(-) rename crates/flashblocks/src/{handle.rs => persist.rs} (100%) create mode 100644 crates/flashblocks/src/state.rs diff --git a/Cargo.lock b/Cargo.lock index 61d130b1..3b9785e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14195,6 +14195,7 @@ dependencies = [ "brotli", "crossbeam-channel", "derive_more", + "either", "eyre", "futures", "futures-util", @@ -14221,7 +14222,11 @@ dependencies = [ "reth-optimism-evm", "reth-optimism-flashblocks", "reth-optimism-forks", + "reth-optimism-payload-builder", "reth-optimism-primitives", + "reth-payload-builder", + "reth-payload-builder-primitives", + "reth-payload-primitives", "reth-primitives-traits", "reth-provider", "reth-revm", @@ -14304,6 +14309,7 @@ dependencies = [ "op-alloy-network", "reth", "reth-basic-payload-builder", + "reth-chain-state", "reth-chainspec", "reth-cli-util", "reth-evm", @@ -14315,17 +14321,21 @@ dependencies = [ "reth-optimism-forks", "reth-optimism-node", "reth-optimism-payload-builder", + "reth-optimism-primitives", "reth-optimism-txpool", "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", "reth-payload-util", "reth-primitives-traits", + "reth-provider", "reth-revm", "reth-rpc-server-types", "reth-storage-api", + "reth-tasks", "reth-transaction-pool", "revm", + "tokio", "tracing", "url", "xlayer-bridge-intercept", diff --git a/bin/node/Cargo.toml b/bin/node/Cargo.toml index 96912dc7..b7190256 100644 --- a/bin/node/Cargo.toml +++ b/bin/node/Cargo.toml @@ -25,7 +25,9 @@ xlayer-trace-monitor.workspace = true # reth reth.workspace = true +reth-chain-state.workspace = true reth-optimism-node.workspace = true +reth-optimism-primitives.workspace = true reth-optimism-cli.workspace = true reth-node-api.workspace = true reth-node-builder.workspace = true @@ -46,7 +48,9 @@ reth-optimism-forks.workspace = true reth-optimism-txpool.workspace = true reth-transaction-pool.workspace = true reth-cli-util.workspace = true +reth-provider.workspace = true reth-rpc-server-types.workspace = true +reth-tasks = { workspace = true, features = ["rayon"] } # alloy alloy-primitives.workspace = true @@ -57,6 +61,9 @@ op-alloy-network.workspace = true # revm revm.workspace = true +# async +tokio.workspace = true + # misc clap.workspace = true tracing.workspace = true diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index 89cd0705..d9296d1b 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -16,13 +16,18 @@ use reth::{ builder::{DebugNodeLauncher, EngineNodeLauncher, Node, NodeHandle, TreeConfig}, providers::providers::BlockchainProvider, }; +use reth_chainspec::ChainSpecProvider; use reth_optimism_cli::Cli; +use reth_optimism_evm::OpEvmConfig; use reth_optimism_node::{args::RollupArgs, OpNode}; +use reth_provider::CanonStateSubscriptions; use reth_rpc_server_types::RethRpcModule; +use reth_tasks::Runtime; use xlayer_chainspec::XLayerChainSpecParser; use xlayer_flashblocks::{ - FlashblockStateCache, FlashblocksPubSub, FlashblocksRpcService, WsFlashBlockStream, + FlashblockStateCache, FlashblocksPersistCtx, FlashblocksPubSub, FlashblocksRpcCtx, + FlashblocksRpcService, WsFlashBlockStream, }; use xlayer_legacy_rpc::{layer::LegacyRpcRouterLayer, LegacyRpcRouterConfig}; use xlayer_monitor::{start_monitor_handle, RpcMonitorLayer, XLayerMonitor}; @@ -125,6 +130,7 @@ fn main() { // Get the payload events tx for pre-warming the engine tree with locally built // pending flashblocks sequence. let events_sender = Arc::new(OnceLock::new()); + let tree_config = builder.config().engine.tree_config(); // Create the X Layer payload service builder // It handles both flashblocks and default modes internally @@ -148,16 +154,26 @@ fn main() { { // Initialize flashblocks RPC let flashblocks_state = FlashblockStateCache::new(); - let stream = WsFlashBlockStream::new(flashblock_url); + let canon_state_rx = ctx.provider().canonical_state_stream(); let service = FlashblocksRpcService::new( - ctx.node().task_executor.clone(), - stream, args.xlayer_args.builder.flashblocks, - args.rollup_args.flashblocks_url.is_some(), - events_sender.get().cloned(), - datadir, + flashblocks_state.clone(), + ctx.node().task_executor.clone(), + FlashblocksRpcCtx { + provider: ctx.provider().clone(), + canon_state_rx, + evm_config: OpEvmConfig::optimism(ctx.provider().chain_spec()), + chain_spec: ctx.provider().chain_spec(), + tree_config, + }, + FlashblocksPersistCtx { + datadir, + relay_flashblocks: args.rollup_args.flashblocks_url.is_some(), + }, )?; - service.spawn(); + service.spawn_prewarm(events_sender); + service.spawn_persistence()?; + service.spawn_rpc(WsFlashBlockStream::new(flashblock_url)); info!(target: "reth::cli", "xlayer flashblocks service initialized"); // Initialize custom flashblocks subscription diff --git a/crates/flashblocks/Cargo.toml b/crates/flashblocks/Cargo.toml index 8dd2e149..5a75a59f 100644 --- a/crates/flashblocks/Cargo.toml +++ b/crates/flashblocks/Cargo.toml @@ -17,6 +17,8 @@ xlayer-builder.workspace = true reth-chain-state = { workspace = true, features = ["serde"] } reth-chainspec.workspace = true reth-db-models.workspace = true +reth-engine-primitives.workspace = true +reth-engine-tree.workspace = true reth-errors.workspace = true reth-evm.workspace = true reth-execution-types = { workspace = true, features = ["serde"] } @@ -24,26 +26,28 @@ reth-metrics.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true reth-optimism-consensus.workspace = true +reth-optimism-flashblocks.workspace = true reth-optimism-forks.workspace = true +reth-optimism-payload-builder.workspace = true reth-optimism-primitives = { workspace = true, features = ["serde"] } +reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true +reth-payload-primitives.workspace = true reth-primitives-traits = { workspace = true, features = ["serde"] } +reth-provider.workspace = true reth-revm.workspace = true reth-rpc.workspace = true reth-rpc-convert.workspace = true reth-rpc-eth-api.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true -reth-optimism-flashblocks.workspace = true reth-storage-api.workspace = true -reth-trie-common.workspace = true +reth-tasks = { workspace = true, features = ["rayon"] } +reth-tracing.workspace = true reth-trie.workspace = true +reth-trie-common.workspace = true reth-trie-db.workspace = true reth-trie-parallel.workspace = true -reth-tasks = { workspace = true, features = ["rayon"] } -reth-tracing.workspace = true -reth-engine-primitives.workspace = true -reth-engine-tree.workspace = true -reth-provider.workspace = true # alloy alloy-consensus.workspace = true @@ -76,6 +80,7 @@ async-trait.workspace = true brotli = { workspace = true, features = ["std"] } crossbeam-channel.workspace = true derive_more.workspace = true +either.workspace = true eyre.workspace = true parking_lot.workspace = true thiserror.workspace = true diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 5b9a7542..19be2a49 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -4,6 +4,7 @@ pub(crate) mod raw; pub(crate) mod utils; pub(crate) use confirm::ConfirmCache; +pub(crate) use raw::RawFlashblocksCache; pub use pending::PendingSequence; diff --git a/crates/flashblocks/src/cache/raw.rs b/crates/flashblocks/src/cache/raw.rs index 86afa3ea..b18fac16 100644 --- a/crates/flashblocks/src/cache/raw.rs +++ b/crates/flashblocks/src/cache/raw.rs @@ -1,12 +1,12 @@ +use crate::execution::BuildArgs; use parking_lot::RwLock; use ringbuffer::{AllocRingBuffer, RingBuffer}; use std::{collections::BTreeMap, sync::Arc}; -use tracing::*; -use alloy_eips::eip2718::WithEncoded; +use alloy_eips::{eip2718::WithEncoded, eip4895::Withdrawal}; use alloy_primitives::B256; use alloy_rpc_types_engine::PayloadId; -use op_alloy_rpc_types_engine::OpFlashblockPayload; +use op_alloy_rpc_types_engine::{OpFlashblockPayload, OpFlashblockPayloadBase}; use reth_primitives_traits::{transaction::TxHashRef, Recovered, SignedTransaction}; @@ -32,13 +32,20 @@ impl RawFlashblocksCache { Self { inner } } - pub fn handle_canonical_height(&mut self, height: u64) { + pub fn handle_canonical_height(&self, height: u64) { self.inner.write().handle_canonical_height(height); } - pub fn handle_flashblock(&mut self, flashblock: OpFlashblockPayload) -> eyre::Result<()> { + pub fn handle_flashblock(&self, flashblock: OpFlashblockPayload) -> eyre::Result<()> { self.inner.write().handle_flashblock(flashblock) } + + pub(crate) fn take_buildable_for_height( + &self, + height: u64, + ) -> Option>>>> { + self.inner.read().take_buildable_for_height(height) + } } #[derive(Debug, Clone)] @@ -54,26 +61,25 @@ impl RawFlashblocksCacheInner { pub fn handle_canonical_height(&mut self, height: u64) { self.canon_height = height; - // Evict entries from the front (oldest) whose block number is at or - // below the new canonical height. - while self + // Evict all entries whose height is at or below the new canonical height. + let retained: Vec<_> = self .cache - .front() - .is_some_and(|entry| entry.block_number().is_some_and(|n| n <= height)) - { - self.cache.dequeue(); + .drain() + .filter(|entry| entry.block_number().map_or(true, |n| n > height)) + .collect(); + for entry in retained { + self.cache.enqueue(entry); } } pub fn handle_flashblock(&mut self, flashblock: OpFlashblockPayload) -> eyre::Result<()> { - if flashblock.block_number() <= self.canon_height { - debug!( - target: "flashblocks", - flashblock_number = flashblock.block_number(), - canon_height = self.canon_height, - "Received old flashblock behind canonical height, skip adding", - ); - return Ok(()); + let incoming_height = flashblock.block_number(); + if incoming_height <= self.canon_height { + return Err(eyre::eyre!( + "Received old flashblock behind canonical height, skip adding to raw cache: flashblock_number={}, canon_height={}", + incoming_height, + self.canon_height, + )); } // Search for an existing entry matching this payload_id. @@ -87,10 +93,20 @@ impl RawFlashblocksCacheInner { // when the cache is full. let mut entry = RawFlashblocksEntry::new(); entry.insert_flashblock(flashblock)?; - self.cache.push(entry); + self.cache.enqueue(entry); } Ok(()) } + + fn take_buildable_for_height( + &self, + height: u64, + ) -> Option>>>> { + self.cache + .iter() + .find(|entry| entry.block_number() == Some(height)) + .and_then(|entry| entry.to_buildable_args()) + } } /// Raw flashblocks sequence keeps track of the flashblocks sequence based on their @@ -117,15 +133,13 @@ impl RawFlashblocksEntry { /// Inserts a flashblock into the sequence. fn insert_flashblock(&mut self, flashblock: OpFlashblockPayload) -> eyre::Result<()> { if !self.can_accept(&flashblock) { - warn!( - target: "flashblocks", - incoming_id = ?flashblock.payload_id, - current_id = ?self.payload_id(), - incoming_height = %flashblock.block_number(), - current_height = ?self.block_number(), - "Incoming flashblock failed to be accepted into the sequence, possible re-org detected", - ); - return Err(eyre::eyre!("incoming flashblock failed to be accepted into the sequence, possible re-org detected")); + return Err(eyre::eyre!( + "Incoming flashblock failed to be accepted into the sequence, possible re-org detected: incoming_id={:?}, current_id={:?}, incoming_height={}, current_height={:?}", + flashblock.payload_id, + self.payload_id(), + flashblock.block_number(), + self.block_number(), + )); } if flashblock.index == 0 { @@ -182,6 +196,31 @@ impl RawFlashblocksEntry { self.recovered_transactions_by_index.values().flatten().map(|tx| *tx.tx_hash()).collect() } + fn base(&self) -> Option<&OpFlashblockPayloadBase> { + self.payloads.get(&0)?.base.as_ref() + } + + fn withdrawals_at(&self, index: u64) -> Vec { + self.payloads.get(&index).map(|p| p.diff.withdrawals.clone()).unwrap_or_default() + } + + fn transactions_up_to(&self, up_to: u64) -> Vec>> { + self.recovered_transactions_by_index + .range(..=up_to) + .flat_map(|(_, txs)| txs.iter().cloned()) + .collect() + } + + fn to_buildable_args(&self) -> Option>>>> { + let best_revision = self.get_best_revision()?; + Some(BuildArgs { + base: self.base()?.clone(), + transactions: self.transactions_up_to(best_revision), + withdrawals: self.withdrawals_at(best_revision), + last_flashblock_index: best_revision, + }) + } + #[cfg(test)] fn transaction_count(&self) -> usize { self.recovered_transactions_by_index.values().map(Vec::len).sum() @@ -476,7 +515,7 @@ mod tests { fn test_raw_flashblocks_cache_handle_flashblock_inserts_via_arc_rwlock() { let factory = TestFlashBlockFactory::new(); let fb0 = factory.flashblock_at(0).build(); - let mut cache = RawFlashblocksCache::::new(); + let cache = RawFlashblocksCache::::new(); let result = cache.handle_flashblock(fb0); assert!(result.is_ok(), "handle_flashblock via Arc wrapper should succeed"); diff --git a/crates/flashblocks/src/execution/mod.rs b/crates/flashblocks/src/execution/mod.rs index 39bcef36..10a7352a 100644 --- a/crates/flashblocks/src/execution/mod.rs +++ b/crates/flashblocks/src/execution/mod.rs @@ -1,6 +1,8 @@ pub(crate) mod assemble; pub(crate) mod validator; +pub(crate) use validator::FlashblockSequenceValidator; + use alloy_eips::eip4895::Withdrawal; use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; @@ -15,13 +17,12 @@ pub(crate) struct BuildArgs { pub(crate) base: OpFlashblockPayloadBase, pub(crate) transactions: I, pub(crate) withdrawals: Vec, - pub(crate) start_flashblock_index: u64, pub(crate) last_flashblock_index: u64, } /// Cached prefix execution data used to resume canonical builds. #[derive(Debug, Clone, Default)] -pub(crate) struct PrefixExecutionMeta { +pub struct PrefixExecutionMeta { /// Cached reads from execution for reuse. pub cached_reads: CachedReads, /// Number of leading transactions covered by cached execution. @@ -46,7 +47,7 @@ enum StateRootStrategy { } /// Receipt requirements for cache-resume flow. -pub(crate) trait FlashblockReceipt: Clone { +pub trait FlashblockReceipt: Clone { /// Adds `gas_offset` to each receipt's `cumulative_gas_used`. fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64); } @@ -66,7 +67,7 @@ impl FlashblockReceipt for OpReceipt { /// Trait alias for the bounds required on a provider factory to create an /// [`OverlayStateProviderFactory`] that supports parallel and serial state /// root computation. -pub(crate) trait OverlayProviderFactory: +pub trait OverlayProviderFactory: DatabaseProviderFactory< Provider: StageCheckpointReader + PruneCheckpointReader diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index eb667d27..dc28ea26 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -145,8 +145,8 @@ where N::Block: From>, { // Pre-validate incoming flashblocks sequence - let pending_sequence = self - .prevalidate_incoming_sequence(args.base.block_number, args.start_flashblock_index)?; + let pending_sequence = + self.prevalidate_incoming_sequence(args.base.block_number, args.last_flashblock_index)?; let parent_hash = args.base.parent_hash; let block_transactions: Vec<_> = args.transactions.into_iter().collect(); @@ -434,7 +434,7 @@ where fn prevalidate_incoming_sequence( &self, incoming_block_number: u64, - incoming_index: u64, + incoming_last_index: u64, ) -> eyre::Result>> { if let Some(pending) = self.flashblocks_state.get_pending_sequence() { // Validate incoming height continuity @@ -449,18 +449,14 @@ where if pending_height == incoming_block_number { // Validate states of last executed flashblock index let last_index = pending.prefix_execution_meta.last_flashblock_index; - if last_index.saturating_add(1) != incoming_index { + if last_index >= incoming_last_index { return Err(eyre::eyre!( - "flashblock index mismatch: incoming={incoming_index}, pending={last_index}" + "flashblock index mismatch: incoming={incoming_last_index}, pending={incoming_last_index}" )); } return Ok(Some(pending)); - } else if incoming_index != 0 { - // Optimistic fresh build. Validate that build is starting from index = 0. - return Err(eyre::eyre!( - "flashblock index mismatch: should start from index 0 but incoming={incoming_index}" - )); } + // Optimistic fresh build return Ok(None); } // No pending sequence initialized yet. Validate with canonical chainstate height diff --git a/crates/flashblocks/src/lib.rs b/crates/flashblocks/src/lib.rs index 20bd2ec7..4628cfda 100644 --- a/crates/flashblocks/src/lib.rs +++ b/crates/flashblocks/src/lib.rs @@ -2,16 +2,18 @@ mod cache; mod execution; -pub(crate) mod handle; -mod service; +mod persist; +mod state; mod subscription; mod ws; +pub mod service; + #[cfg(test)] mod test_utils; pub use cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}; -pub use service::FlashblocksRpcService; +pub use service::{FlashblocksPersistCtx, FlashblocksRpcCtx, FlashblocksRpcService}; pub use subscription::FlashblocksPubSub; pub use ws::WsFlashBlockStream; diff --git a/crates/flashblocks/src/handle.rs b/crates/flashblocks/src/persist.rs similarity index 100% rename from crates/flashblocks/src/handle.rs rename to crates/flashblocks/src/persist.rs diff --git a/crates/flashblocks/src/service.rs b/crates/flashblocks/src/service.rs index 975b0846..baa76ca3 100644 --- a/crates/flashblocks/src/service.rs +++ b/crates/flashblocks/src/service.rs @@ -1,14 +1,31 @@ use crate::{ - handle::{handle_persistence, handle_relay_flashblocks}, + cache::{raw::RawFlashblocksCache, FlashblockStateCache}, + execution::{FlashblockReceipt, FlashblockSequenceValidator, OverlayProviderFactory}, + persist::{handle_persistence, handle_relay_flashblocks}, + state::{handle_canonical_stream, handle_execution_tasks, handle_incoming_flashblocks}, ReceivedFlashblocksRx, }; -use futures_util::{FutureExt, Stream, StreamExt}; -use std::{net::SocketAddr, sync::Arc, time::Duration}; -use tokio::time::sleep; +use futures_util::Stream; +use std::{ + collections::BTreeSet, + net::SocketAddr, + sync::{Arc, Condvar, Mutex, OnceLock}, +}; +use tokio::sync::broadcast::Sender; use tracing::*; -use op_alloy_rpc_types_engine::OpFlashblockPayload; +use alloy_eips::eip2718::Encodable2718; +use op_alloy_rpc_types_engine::{OpFlashblockPayload, OpFlashblockPayloadBase}; + +use reth_chain_state::CanonStateNotificationStream; +use reth_engine_primitives::TreeConfig; +use reth_evm::ConfigureEvm; use reth_node_core::dirs::{ChainPath, DataDirPath}; +use reth_optimism_forks::OpHardforks; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{ + BlockReader, HashedPostStateProvider, HeaderProvider, StateProviderFactory, StateReader, +}; use reth_tasks::TaskExecutor; use xlayer_builder::{ @@ -17,62 +34,88 @@ use xlayer_builder::{ metrics::{tokio::FlashblocksTaskMetrics, BuilderMetrics}, }; -const CONNECTION_BACKOUT_PERIOD: Duration = Duration::from_secs(5); +pub const EXECUTION_TASK_QUEUE_CAPACITY: usize = 5; -pub struct FlashblocksRpcService { - /// Incoming flashblock stream. - incoming_flashblock_rx: S, - /// Broadcast channel to forward received flashblocks from the subscription. - received_flashblocks_tx: tokio::sync::broadcast::Sender>, +pub type ExecutionTaskQueue = Arc<(Mutex>, Condvar)>; + +/// Context for flashblocks RPC state handles. +pub struct FlashblocksRpcCtx { + /// Canonical chainstate provider. + pub provider: Provider, + /// Canonical state notification stream. + pub canon_state_rx: CanonStateNotificationStream, + /// Evm config for the sequence validator. + pub evm_config: EvmConfig, + /// Chain specs for the sequence validator. + pub chain_spec: Arc, + /// Node engine tree configuration for the sequence validator. + pub tree_config: TreeConfig, +} + +/// Context for handling flashblocks persistence and relaying. +pub struct FlashblocksPersistCtx { + /// Data directory for flashblocks persistence. + pub datadir: ChainPath, + /// Whether to relay flashblocks to the subscribers. + pub relay_flashblocks: bool, +} + +pub struct FlashblocksRpcService +where + N: NodePrimitives, + EvmConfig: ConfigureEvm, + ChainSpec: OpHardforks, +{ + /// Flashblock configurations. + args: FlashblocksArgs, + /// Flashblocks state cache (shared with RPC handlers). + flashblocks_state: FlashblockStateCache, + /// Flashblocks RPC context. + rpc_ctx: FlashblocksRpcCtx, + /// Flashblocks persist context. + persist_ctx: FlashblocksPersistCtx, /// Task executor. task_executor: TaskExecutor, - /// Flashblocks websocket publisher for relaying flashblocks to subscribers. - ws_pub: Arc, - /// Whether to relay flashblocks to the subscribers. - relay_flashblocks: bool, - /// Payload events sender for forwarding locally-built payloads to the engine state tree. - events_sender: Option, - /// Data directory for flashblocks persistence. - datadir: ChainPath, + /// Broadcast channel to forward received flashblocks from the subscription. + received_flashblocks_tx: Sender>, } -impl FlashblocksRpcService +impl FlashblocksRpcService where - S: Stream> + Unpin + 'static, + N: NodePrimitives, + N::Receipt: FlashblockReceipt, + N::SignedTx: Encodable2718, + N::Block: From>, + EvmConfig: ConfigureEvm + Unpin> + + Send + + 'static, + Provider: StateProviderFactory + + HeaderProvider
::BlockHeader> + + OverlayProviderFactory + + BlockReader + + StateReader + + HashedPostStateProvider + + Unpin + + Clone + + Send + + 'static, + ChainSpec: OpHardforks + Send + Sync + 'static, { pub fn new( - task_executor: TaskExecutor, - incoming_flashblock_rx: S, args: FlashblocksArgs, - relay_flashblocks: bool, - events_sender: Option, - datadir: ChainPath, - ) -> Result { + flashblocks_state: FlashblockStateCache, + task_executor: TaskExecutor, + rpc_ctx: FlashblocksRpcCtx, + persist_ctx: FlashblocksPersistCtx, + ) -> eyre::Result { let (received_flashblocks_tx, _) = tokio::sync::broadcast::channel(128); - - // Initialize ws publisher for relaying flashblocks - let ws_addr = SocketAddr::new(args.flashblocks_addr.parse()?, args.flashblocks_port); - let metrics = Arc::new(BuilderMetrics::default()); - let task_metrics = Arc::new(FlashblocksTaskMetrics::new()); - let ws_pub = Arc::new( - WebSocketPublisher::new( - ws_addr, - metrics, - &task_metrics.websocket_publisher, - args.ws_subscriber_limit, - ) - .map_err(|e| eyre::eyre!("Failed to create WebSocket publisher: {e}"))?, - ); - info!(target: "flashblocks", "WebSocket publisher initialized at {}", ws_addr); - Ok(Self { - incoming_flashblock_rx, - received_flashblocks_tx, + args, + flashblocks_state, + rpc_ctx, + persist_ctx, task_executor, - ws_pub, - relay_flashblocks, - events_sender, - datadir, + received_flashblocks_tx, }) } @@ -81,10 +124,10 @@ where self.received_flashblocks_tx.subscribe() } - pub fn spawn(&self) { - debug!(target: "flashblocks", "Initializing flashblocks service"); + pub fn spawn_persistence(&self) -> eyre::Result<()> { // Spawn persistence handle - let datadir = self.datadir.clone(); + debug!(target: "flashblocks", "Initializing flashblocks persistence"); + let datadir = self.persist_ctx.datadir.clone(); let rx = self.subscribe_received_flashblocks(); self.task_executor.spawn_critical_task( "xlayer-flashblocks-persistence", @@ -92,11 +135,24 @@ where handle_persistence(rx, datadir).await; }), ); - // Spawn relayer handle - if self.relay_flashblocks { + if self.persist_ctx.relay_flashblocks { + let ws_addr = + SocketAddr::new(self.args.flashblocks_addr.parse()?, self.args.flashblocks_port); + let metrics = Arc::new(BuilderMetrics::default()); + let task_metrics = Arc::new(FlashblocksTaskMetrics::new()); + let ws_pub = Arc::new( + WebSocketPublisher::new( + ws_addr, + metrics, + &task_metrics.websocket_publisher, + self.args.ws_subscriber_limit, + ) + .map_err(|e| eyre::eyre!("Failed to create WebSocket publisher: {e}"))?, + ); + info!(target: "flashblocks", "WebSocket publisher initialized at {ws_addr}"); + let rx = self.subscribe_received_flashblocks(); - let ws_pub = self.ws_pub.clone(); self.task_executor.spawn_critical_task( "xlayer-flashblocks-publish", Box::pin(async move { @@ -104,60 +160,100 @@ where }), ); } + Ok(()) } - /// Contains the main logic for processing raw incoming flashblocks, and updating the - /// flashblocks state cache layer. The logic pipeline is as follows: - /// 1. Notifies subscribers - /// 2. Inserts into the raw flashblocks cache - pub async fn handle_flashblocks(&mut self) { - loop { - tokio::select! { - // Event 1: New flashblock arrives (batch process all ready flashblocks) - result = self.incoming_flashblock_rx.next() => { - match result { - Some(Ok(flashblock)) => { - // Process first flashblock - self.process_flashblock(flashblock); - - // Batch process all other immediately available flashblocks - while let Some(result) = self.incoming_flashblock_rx.next().now_or_never().flatten() { - match result { - Ok(fb) => self.process_flashblock(fb), - Err(err) => warn!(target: "flashblocks", %err, "Error receiving flashblock"), - } - } - } - Some(Err(err)) => { - warn!( - target: "flashblocks", - %err, - retry_period = CONNECTION_BACKOUT_PERIOD.as_secs(), - "Error receiving flashblock" - ); - sleep(CONNECTION_BACKOUT_PERIOD).await; - } - None => { - warn!(target: "flashblocks", "Flashblock stream ended"); - break; - } - } - } - } - } - } + pub fn spawn_rpc(self, incoming_rx: S) + where + S: Stream> + Unpin + Send + 'static, + { + debug!(target: "flashblocks", "Initializing flashblocks rpc"); + let raw_cache = Arc::new(RawFlashblocksCache::new()); + let validator = FlashblockSequenceValidator::new( + self.rpc_ctx.evm_config, + self.rpc_ctx.provider, + self.rpc_ctx.chain_spec, + self.flashblocks_state.clone(), + self.task_executor.clone(), + self.rpc_ctx.tree_config, + ); + let task_queue = Arc::new((Mutex::new(BTreeSet::new()), Condvar::new())); + + // Spawn incoming raw flashblocks handle. + let received_tx = self.received_flashblocks_tx.clone(); + self.task_executor.spawn_critical_task( + "xlayer-flashblocks-payload", + Box::pin(handle_incoming_flashblocks::( + incoming_rx, + received_tx, + raw_cache.clone(), + task_queue.clone(), + )), + ); - /// Processes a single flashblock: notifies subscribers, and inserts into - /// the raw flashblocks cache. - fn process_flashblock(&mut self, flashblock: OpFlashblockPayload) { - self.notify_received_flashblock(&flashblock); - // TODO: Insert into the raw flashblocks cache + // Spawn the flashblocks sequence execution task handle on a dedicated blocking thread. + let cache = raw_cache.clone(); + self.task_executor.spawn_critical_blocking_task( + "xlayer-flashblocks-execution", + async move { + handle_execution_tasks::( + validator, cache, task_queue, + ); + }, + ); + + // Spawn the canonical stream handle. + self.task_executor.spawn_critical_task( + "xlayer-flashblocks-canonical", + Box::pin(handle_canonical_stream::( + self.rpc_ctx.canon_state_rx, + self.flashblocks_state, + raw_cache, + )), + ); } - /// Notifies all subscribers about the received flashblock. - fn notify_received_flashblock(&self, flashblock: &OpFlashblockPayload) { - if self.received_flashblocks_tx.receiver_count() > 0 { - let _ = self.received_flashblocks_tx.send(Arc::new(flashblock.clone())); + pub fn spawn_prewarm(&self, events_sender: Arc>) + where + N: NodePrimitives< + Block = ::Block, + Receipt = ::Receipt, + >, + { + let mut pending_rx = self.flashblocks_state.subscribe_pending_sequence(); + if let Some(payload_events_sender) = events_sender.get().cloned() { + self.task_executor.spawn_critical_task( + "xlayer-flashblocks-prewarm", + Box::pin(async move { + use either::Either; + use reth_optimism_payload_builder::OpBuiltPayload; + use reth_optimism_primitives::OpPrimitives; + use reth_payload_builder_primitives::Events; + + while pending_rx.changed().await.is_ok() { + let Some(pending_sequence) = pending_rx.borrow_and_update().clone() else { + continue; + }; + let executed = &pending_sequence.executed_block; + let block = executed.recovered_block.clone_sealed_block(); + let trie_data = executed.trie_data(); + let built = + reth_payload_primitives::BuiltPayloadExecutedBlock:: { + recovered_block: executed.recovered_block.clone(), + execution_output: executed.execution_output.clone(), + hashed_state: Either::Right(trie_data.hashed_state), + trie_updates: Either::Right(trie_data.trie_updates), + }; + let payload = OpBuiltPayload::::new( + reth_payload_builder::PayloadId::default(), + Arc::new(block), + alloy_primitives::U256::ZERO, + Some(built), + ); + let _ = payload_events_sender.send(Events::BuiltPayload(payload)); + } + }), + ); } } } diff --git a/crates/flashblocks/src/state.rs b/crates/flashblocks/src/state.rs new file mode 100644 index 00000000..25a14da9 --- /dev/null +++ b/crates/flashblocks/src/state.rs @@ -0,0 +1,222 @@ +use crate::{ + cache::RawFlashblocksCache, + execution::validator::FlashblockSequenceValidator, + execution::{FlashblockReceipt, OverlayProviderFactory}, + service::{ExecutionTaskQueue, EXECUTION_TASK_QUEUE_CAPACITY}, + FlashblockStateCache, +}; +use futures_util::{FutureExt, Stream, StreamExt}; +use std::{sync::Arc, time::Duration}; +use tokio::{sync::broadcast::Sender, time::sleep}; + +use tracing::*; + +use alloy_consensus::BlockHeader; +use alloy_eips::eip2718::Encodable2718; +use op_alloy_rpc_types_engine::{OpFlashblockPayload, OpFlashblockPayloadBase}; + +use reth_chain_state::CanonStateNotificationStream; +use reth_evm::ConfigureEvm; +use reth_optimism_forks::OpHardforks; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{ + BlockReader, HashedPostStateProvider, HeaderProvider, StateProviderFactory, StateReader, +}; + +const CONNECTION_BACKOUT_PERIOD: Duration = Duration::from_secs(5); + +pub async fn handle_incoming_flashblocks( + mut incoming_rx: S, + received_tx: Sender>, + raw_cache: Arc>, + task_queue: ExecutionTaskQueue, +) where + S: Stream> + Unpin + Send + 'static, + N: NodePrimitives, +{ + info!(target: "flashblocks", "Flashblocks raw handle started"); + loop { + match incoming_rx.next().await { + Some(Ok(payload)) => { + if let Err(err) = + process_flashblock_payload::(payload, &received_tx, &raw_cache, &task_queue) + { + warn!( + target: "flashblocks", + %err, + "Error receiving flashblock payload" + ); + continue; + }; + + // Batch process all other immediately available flashblocks + while let Some(result) = incoming_rx.next().now_or_never().flatten() { + match result { + Ok(payload) => { + if let Err(err) = process_flashblock_payload::( + payload, + &received_tx, + &raw_cache, + &task_queue, + ) { + warn!( + target: "flashblocks", + %err, + "Error receiving flashblock payload" + ); + continue; + }; + } + Err(err) => { + warn!(target: "flashblocks", %err, "Error receiving flashblock"); + continue; + } + } + } + // Schedule executor + task_queue.1.notify_one(); + } + Some(Err(err)) => { + warn!( + target: "flashblocks:handle", + %err, + retry_period = CONNECTION_BACKOUT_PERIOD.as_secs(), + "Error receiving flashblock" + ); + sleep(CONNECTION_BACKOUT_PERIOD).await; + } + None => { + break; + } + } + } + warn!(target: "flashblocks:handle", "Flashblock payload handle ended"); +} + +fn process_flashblock_payload( + flashblock: OpFlashblockPayload, + received_tx: &tokio::sync::broadcast::Sender>, + raw_cache: &RawFlashblocksCache, + task_queue: &ExecutionTaskQueue, +) -> eyre::Result<()> { + if received_tx.receiver_count() > 0 { + let _ = received_tx.send(Arc::new(flashblock.clone())); + } + // Insert into raw cache + let height = flashblock.block_number(); + raw_cache.handle_flashblock(flashblock)?; + + // Enqueue to execution tasks + let mut queue = + task_queue.0.lock().map_err(|e| eyre::eyre!("Task queue lock poisoned: {e}"))?; + if !queue.contains(&height) && queue.len() >= EXECUTION_TASK_QUEUE_CAPACITY { + // Queue is full — evict the lowest block height before inserting. + let evicted = queue.pop_first(); + warn!( + target: "flashblocks", + ?evicted, + new_height = height, + "Execution task queue full, evicting lowest height" + ); + } + queue.insert(height); + Ok(()) +} + +pub fn handle_execution_tasks( + mut validator: FlashblockSequenceValidator, + raw_cache: Arc>, + task_queue: ExecutionTaskQueue, +) where + N: NodePrimitives, + N::Receipt: FlashblockReceipt, + N::SignedTx: Encodable2718, + N::Block: From>, + EvmConfig: ConfigureEvm + Unpin> + + Send + + 'static, + Provider: StateProviderFactory + + HeaderProvider
::BlockHeader> + + OverlayProviderFactory + + BlockReader + + StateReader + + HashedPostStateProvider + + Unpin + + Clone + + Send + + 'static, + ChainSpec: OpHardforks + Send + Sync + 'static, +{ + info!(target: "flashblocks", "Flashblocks execution handle started"); + let (queue_mutex, condvar) = &*task_queue; + loop { + let execute_height = { + let guard = match queue_mutex.lock() { + Ok(g) => g, + Err(err) => { + warn!(target: "flashblocks", %err, "Task queue mutex poisoned, retrying"); + continue; + } + }; + let mut queue = match condvar.wait_while(guard, |q| q.is_empty()) { + Ok(g) => g, + Err(err) => { + warn!(target: "flashblocks", %err, "Task queue condvar wait poisoned, retrying"); + continue; + } + }; + queue.pop_first().unwrap() + }; + + // Extract buildable sequence for this height from raw cache + let Some(args) = raw_cache.take_buildable_for_height(execute_height) else { + trace!( + target: "flashblocks", + execute_height = execute_height, + "No buildable args for excution task height, skipping" + ); + continue; + }; + debug!( + target: "flashblocks", + execute_height = execute_height, + last_index = args.last_flashblock_index, + "Executing flashblocks sequence" + ); + + if let Err(err) = validator.execute_sequence(args) { + warn!( + target: "flashblocks", + %err, + execute_height = execute_height, + "Validator failed to execute flashblocks sequence" + ); + } + } +} + +pub async fn handle_canonical_stream( + mut canon_rx: CanonStateNotificationStream, + flashblocks_state: FlashblockStateCache, + raw_cache: Arc>, +) { + info!(target: "flashblocks", "Canonical state handler started"); + while let Some(notification) = canon_rx.next().await { + let tip = notification.tip(); + let block_hash = tip.hash(); + let block_number = tip.number(); + let is_reorg = notification.reverted().is_some(); + + flashblocks_state.handle_canonical_block((block_number, block_hash), is_reorg); + raw_cache.handle_canonical_height(block_number); + + debug!( + target: "flashblocks", + block_number, + ?block_hash, + is_reorg, + "Canonical block processed" + ); + } + warn!(target: "flashblocks", "Canonical state stream ended"); +} From c34420609b1f6ee8266d4c4735d773346561fa25 Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 25 Mar 2026 09:31:55 +0800 Subject: [PATCH 48/76] refactor(flashblocks): flush execution task queue on state cache reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Return a boolean from handle_canonical_block to signal when a flush occurred, then drain the execution task queue so stale heights built against invalidated state are discarded. Also rename raw cache methods to try_* for clarity and remove unused transactions/tx_hashes helpers. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/cache/mod.rs | 6 ++- crates/flashblocks/src/cache/raw.rs | 68 ++++++----------------------- crates/flashblocks/src/service.rs | 40 ++++++++++++++++- crates/flashblocks/src/state.rs | 9 ++-- 4 files changed, 63 insertions(+), 60 deletions(-) diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 19be2a49..45d4cc97 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -267,7 +267,7 @@ impl FlashblockStateCache { /// It also detects chainstate re-orgs (set with re-org arg flag) and flashblocks /// state cache pollution. By default once error is detected, we will automatically /// flush the flashblocks state cache. - pub fn handle_canonical_block(&self, canon_info: (u64, B256), reorg: bool) { + pub fn handle_canonical_block(&self, canon_info: (u64, B256), reorg: bool) -> bool { debug!( target: "flashblocks", canonical_height = canon_info.0, @@ -387,7 +387,7 @@ impl FlashblockStateCacheInner { Ok(()) } - fn handle_canonical_block(&mut self, canon_info: (u64, B256), reorg: bool) { + fn handle_canonical_block(&mut self, canon_info: (u64, B256), reorg: bool) -> bool { let pending_stale = self.pending_cache.as_ref().is_some_and(|p| p.get_height() <= canon_info.0); if pending_stale || reorg { @@ -400,6 +400,7 @@ impl FlashblockStateCacheInner { "Reorg or pending stale detected on handle canonical block", ); self.flush(); + return true; } else { debug!( target: "flashblocks", @@ -413,6 +414,7 @@ impl FlashblockStateCacheInner { // Update state heights self.canon_info = canon_info; self.confirm_height = self.confirm_height.max(canon_info.0); + false } pub fn get_confirmed_block(&self) -> Option> { diff --git a/crates/flashblocks/src/cache/raw.rs b/crates/flashblocks/src/cache/raw.rs index b18fac16..dcae0138 100644 --- a/crates/flashblocks/src/cache/raw.rs +++ b/crates/flashblocks/src/cache/raw.rs @@ -4,11 +4,10 @@ use ringbuffer::{AllocRingBuffer, RingBuffer}; use std::{collections::BTreeMap, sync::Arc}; use alloy_eips::{eip2718::WithEncoded, eip4895::Withdrawal}; -use alloy_primitives::B256; use alloy_rpc_types_engine::PayloadId; use op_alloy_rpc_types_engine::{OpFlashblockPayload, OpFlashblockPayloadBase}; -use reth_primitives_traits::{transaction::TxHashRef, Recovered, SignedTransaction}; +use reth_primitives_traits::{Recovered, SignedTransaction}; const MAX_RAW_CACHE_SIZE: usize = 10; @@ -40,11 +39,11 @@ impl RawFlashblocksCache { self.inner.write().handle_flashblock(flashblock) } - pub(crate) fn take_buildable_for_height( + pub(crate) fn try_get_buildable_args( &self, height: u64, ) -> Option>>>> { - self.inner.read().take_buildable_for_height(height) + self.inner.read().try_get_buildable_args(height) } } @@ -98,14 +97,14 @@ impl RawFlashblocksCacheInner { Ok(()) } - fn take_buildable_for_height( + fn try_get_buildable_args( &self, height: u64, ) -> Option>>>> { self.cache .iter() .find(|entry| entry.block_number() == Some(height)) - .and_then(|entry| entry.to_buildable_args()) + .and_then(|entry| entry.try_to_buildable_args()) } } @@ -162,7 +161,7 @@ impl RawFlashblocksEntry { && self.payloads.get(&flashblock.index).is_none() } - fn get_best_revision(&self) -> Option { + fn try_get_best_revision(&self) -> Option { if !self.has_base || self.payloads.is_empty() { return None; } @@ -188,14 +187,6 @@ impl RawFlashblocksEntry { Some(self.payloads.values().next()?.payload_id) } - fn transactions(&self) -> Vec>> { - self.recovered_transactions_by_index.values().flatten().cloned().collect() - } - - fn tx_hashes(&self) -> Vec { - self.recovered_transactions_by_index.values().flatten().map(|tx| *tx.tx_hash()).collect() - } - fn base(&self) -> Option<&OpFlashblockPayloadBase> { self.payloads.get(&0)?.base.as_ref() } @@ -211,8 +202,8 @@ impl RawFlashblocksEntry { .collect() } - fn to_buildable_args(&self) -> Option>>>> { - let best_revision = self.get_best_revision()?; + fn try_to_buildable_args(&self) -> Option>>>> { + let best_revision = self.try_get_best_revision()?; Some(BuildArgs { base: self.base()?.clone(), transactions: self.transactions_up_to(best_revision), @@ -235,10 +226,6 @@ mod tests { type TestRawCache = RawFlashblocksCacheInner; - // ===== RawFlashblocksEntry tests via RawFlashblocksCacheInner ===== - - // --- can_accept --- - #[test] fn test_raw_entry_can_accept_first_flashblock_on_empty_entry() { // Arrange @@ -306,7 +293,7 @@ mod tests { cache.handle_flashblock(fb1).expect("fb1 insert"); let entry = cache.cache.iter().next().expect("entry should exist"); - let best = entry.get_best_revision(); + let best = entry.try_get_best_revision(); assert!(best.is_none(), "get_best_revision should return None without base (index 0)"); } @@ -318,7 +305,7 @@ mod tests { let mut cache = TestRawCache::new(); cache.handle_flashblock(fb0).expect("fb0 insert"); let entry = cache.cache.iter().next().expect("entry should exist"); - let best = entry.get_best_revision(); + let best = entry.try_get_best_revision(); assert_eq!(best, Some(0), "only index 0 → best revision is 0"); } @@ -336,7 +323,7 @@ mod tests { cache.handle_flashblock(fb2).expect("fb2"); cache.handle_flashblock(fb3).expect("fb3"); let entry = cache.cache.iter().next().expect("entry should exist"); - let best = entry.get_best_revision(); + let best = entry.try_get_best_revision(); assert_eq!(best, Some(3), "consecutive 0..3 → best revision 3"); } @@ -353,7 +340,7 @@ mod tests { cache.handle_flashblock(fb1).expect("fb1"); cache.handle_flashblock(fb3).expect("fb3 (gap after index 1)"); let entry = cache.cache.iter().next().expect("entry should exist"); - let best = entry.get_best_revision(); + let best = entry.try_get_best_revision(); assert_eq!(best, Some(1), "gap between 1 and 3 → best revision is 1"); } @@ -529,7 +516,7 @@ mod tests { let mut cache = TestRawCache::new(); cache.handle_flashblock(fb1).expect("fb1 insert"); let entry = cache.cache.iter().next().expect("entry should exist"); - let best = entry.get_best_revision(); + let best = entry.try_get_best_revision(); // Assert: no base → None, even though index 1 exists assert!(best.is_none(), "no base means get_best_revision must return None"); } @@ -548,7 +535,7 @@ mod tests { cache.handle_flashblock(fb0).expect("fb0"); cache.handle_flashblock(fb2).expect("fb2"); let entry = cache.cache.iter().next().expect("entry should exist"); - let best = entry.get_best_revision(); + let best = entry.try_get_best_revision(); // Assert: gap immediately after base (index 1 missing) → best revision is 0 assert_eq!(best, Some(0), "gap at index 1 means best revision stays at 0"); } @@ -605,33 +592,6 @@ mod tests { assert_eq!(entry.payloads.len(), 4, "entry should contain 4 payloads"); } - #[test] - fn test_raw_entry_transactions_returns_empty_vec_on_empty_flashblock() { - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).build(); - let mut cache = TestRawCache::new(); - - cache.handle_flashblock(fb0).expect("fb0 insert"); - let entry = cache.cache.iter().next().expect("entry should exist"); - let txs = entry.transactions(); - assert!(txs.is_empty(), "flashblock with no txs should return empty transactions vec"); - } - - #[test] - fn test_raw_entry_tx_hashes_consistent_with_transaction_count() { - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).build(); - let mut cache = TestRawCache::new(); - - cache.handle_flashblock(fb0).expect("fb0 insert"); - let entry = cache.cache.iter().next().expect("entry should exist"); - assert_eq!( - entry.tx_hashes().len(), - entry.transaction_count(), - "tx_hashes length should match transaction_count" - ); - } - #[test] fn test_flashblock_serde_roundtrip() { let raw = r#"{ diff --git a/crates/flashblocks/src/service.rs b/crates/flashblocks/src/service.rs index baa76ca3..6d4aece3 100644 --- a/crates/flashblocks/src/service.rs +++ b/crates/flashblocks/src/service.rs @@ -38,6 +38,42 @@ pub const EXECUTION_TASK_QUEUE_CAPACITY: usize = 5; pub type ExecutionTaskQueue = Arc<(Mutex>, Condvar)>; +/// Extension trait for [`ExecutionTaskQueue`] providing a flush operation. +pub trait ExecutionTaskQueueFlush { + /// Clears all pending execution tasks from the queue. + /// + /// Called when a flush is detected on the flashblocks state layer (reorg or stale + /// pending) to drain any queued block heights that were built against now-invalidated + /// state. The execution worker will re-enter its wait loop and pick up fresh tasks + /// from incoming flashblocks after this call. + fn flush(&self); +} + +impl ExecutionTaskQueueFlush for ExecutionTaskQueue { + fn flush(&self) { + match self.0.lock() { + Ok(mut queue) => { + let flushed = queue.len(); + queue.clear(); + if flushed > 0 { + warn!( + target: "flashblocks", + flushed, + "Execution task queue flushed on state reset" + ); + } + } + Err(err) => { + warn!( + target: "flashblocks", + %err, + "Failed to flush execution task queue: mutex poisoned" + ); + } + } + } +} + /// Context for flashblocks RPC state handles. pub struct FlashblocksRpcCtx { /// Canonical chainstate provider. @@ -193,11 +229,12 @@ where // Spawn the flashblocks sequence execution task handle on a dedicated blocking thread. let cache = raw_cache.clone(); + let queue = task_queue.clone(); self.task_executor.spawn_critical_blocking_task( "xlayer-flashblocks-execution", async move { handle_execution_tasks::( - validator, cache, task_queue, + validator, cache, queue, ); }, ); @@ -209,6 +246,7 @@ where self.rpc_ctx.canon_state_rx, self.flashblocks_state, raw_cache, + task_queue, )), ); } diff --git a/crates/flashblocks/src/state.rs b/crates/flashblocks/src/state.rs index 25a14da9..9886e766 100644 --- a/crates/flashblocks/src/state.rs +++ b/crates/flashblocks/src/state.rs @@ -2,7 +2,7 @@ use crate::{ cache::RawFlashblocksCache, execution::validator::FlashblockSequenceValidator, execution::{FlashblockReceipt, OverlayProviderFactory}, - service::{ExecutionTaskQueue, EXECUTION_TASK_QUEUE_CAPACITY}, + service::{ExecutionTaskQueue, ExecutionTaskQueueFlush, EXECUTION_TASK_QUEUE_CAPACITY}, FlashblockStateCache, }; use futures_util::{FutureExt, Stream, StreamExt}; @@ -169,7 +169,7 @@ pub fn handle_execution_tasks( }; // Extract buildable sequence for this height from raw cache - let Some(args) = raw_cache.take_buildable_for_height(execute_height) else { + let Some(args) = raw_cache.try_get_buildable_args(execute_height) else { trace!( target: "flashblocks", execute_height = execute_height, @@ -199,6 +199,7 @@ pub async fn handle_canonical_stream( mut canon_rx: CanonStateNotificationStream, flashblocks_state: FlashblockStateCache, raw_cache: Arc>, + task_queue: ExecutionTaskQueue, ) { info!(target: "flashblocks", "Canonical state handler started"); while let Some(notification) = canon_rx.next().await { @@ -207,8 +208,10 @@ pub async fn handle_canonical_stream( let block_number = tip.number(); let is_reorg = notification.reverted().is_some(); - flashblocks_state.handle_canonical_block((block_number, block_hash), is_reorg); raw_cache.handle_canonical_height(block_number); + if flashblocks_state.handle_canonical_block((block_number, block_hash), is_reorg) { + task_queue.flush(); + } debug!( target: "flashblocks", From a74a10c497955d0b39287d873d6defca4b39bd51 Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 25 Mar 2026 10:17:30 +0800 Subject: [PATCH 49/76] refactor(flashblocks): simplify flush return flow and fix minor issues MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Consolidate early-return flush logic into a single return path, prefer newest raw cache entry via reverse iteration, fix typo in log message, and inline format args for clippy compliance. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/cache/mod.rs | 8 ++++---- crates/flashblocks/src/cache/raw.rs | 3 +++ crates/flashblocks/src/state.rs | 8 ++++---- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 45d4cc97..1828914e 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -390,17 +390,17 @@ impl FlashblockStateCacheInner { fn handle_canonical_block(&mut self, canon_info: (u64, B256), reorg: bool) -> bool { let pending_stale = self.pending_cache.as_ref().is_some_and(|p| p.get_height() <= canon_info.0); - if pending_stale || reorg { + let flush = pending_stale || reorg; + if flush { warn!( target: "flashblocks", canonical_height = canon_info.0, cache_height = self.confirm_height, canonical_reorg = reorg, - pending_stale = pending_stale, + pending_stale, "Reorg or pending stale detected on handle canonical block", ); self.flush(); - return true; } else { debug!( target: "flashblocks", @@ -414,7 +414,7 @@ impl FlashblockStateCacheInner { // Update state heights self.canon_info = canon_info; self.confirm_height = self.confirm_height.max(canon_info.0); - false + flush } pub fn get_confirmed_block(&self) -> Option> { diff --git a/crates/flashblocks/src/cache/raw.rs b/crates/flashblocks/src/cache/raw.rs index dcae0138..4afb4047 100644 --- a/crates/flashblocks/src/cache/raw.rs +++ b/crates/flashblocks/src/cache/raw.rs @@ -101,8 +101,11 @@ impl RawFlashblocksCacheInner { &self, height: u64, ) -> Option>>>> { + // Iterate newest-first so that the most recent entry is always picked first + // (same height, different payload_id). self.cache .iter() + .rev() .find(|entry| entry.block_number() == Some(height)) .and_then(|entry| entry.try_to_buildable_args()) } diff --git a/crates/flashblocks/src/state.rs b/crates/flashblocks/src/state.rs index 9886e766..ecc457e7 100644 --- a/crates/flashblocks/src/state.rs +++ b/crates/flashblocks/src/state.rs @@ -172,14 +172,14 @@ pub fn handle_execution_tasks( let Some(args) = raw_cache.try_get_buildable_args(execute_height) else { trace!( target: "flashblocks", - execute_height = execute_height, - "No buildable args for excution task height, skipping" + execute_height, + "No buildable args for execution task height, skipping" ); continue; }; debug!( target: "flashblocks", - execute_height = execute_height, + execute_height, last_index = args.last_flashblock_index, "Executing flashblocks sequence" ); @@ -188,7 +188,7 @@ pub fn handle_execution_tasks( warn!( target: "flashblocks", %err, - execute_height = execute_height, + execute_height, "Validator failed to execute flashblocks sequence" ); } From b0a8862a6f54950e8461262f44b746f92eadeeab Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 25 Mar 2026 10:43:02 +0800 Subject: [PATCH 50/76] feat(flashblocks): track payload_id through execution for incremental build validation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thread payload_id from raw cache build args into PrefixExecutionMeta so the validator can verify that incremental builds belong to the same payload sequence. Also fix test assertions for rejected flashblocks and tighten prevalidate_incoming_sequence to detect payload_id mismatches. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/cache/raw.rs | 7 +++-- crates/flashblocks/src/execution/mod.rs | 4 +++ crates/flashblocks/src/execution/validator.rs | 28 +++++++++++++------ 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/crates/flashblocks/src/cache/raw.rs b/crates/flashblocks/src/cache/raw.rs index 4afb4047..c03f1046 100644 --- a/crates/flashblocks/src/cache/raw.rs +++ b/crates/flashblocks/src/cache/raw.rs @@ -209,6 +209,7 @@ impl RawFlashblocksEntry { let best_revision = self.try_get_best_revision()?; Some(BuildArgs { base: self.base()?.clone(), + payload_id: self.payload_id()?, transactions: self.transactions_up_to(best_revision), withdrawals: self.withdrawals_at(best_revision), last_flashblock_index: best_revision, @@ -271,8 +272,8 @@ mod tests { .payload_id(payload_id) .build(); let result = cache.handle_flashblock(fb_wrong_block); - assert!(result.is_ok(), "mismatched block number creates a new entry"); - assert_eq!(cache.cache.len(), 2, "should have two distinct entries"); + assert!(result.is_err(), "mismatched block number with same payload_id should be rejected"); + assert_eq!(cache.cache.len(), 1, "rejected flashblock should not create a new entry"); } #[test] @@ -401,7 +402,7 @@ mod tests { cache.handle_canonical_height(100); let result = cache.handle_flashblock(fb100); - assert!(result.is_ok(), "flashblock at canonical height returns Ok"); + assert!(result.is_err(), "flashblock at canonical height should be rejected"); assert_eq!(cache.cache.len(), 0, "flashblock at canonical height should not be inserted"); } diff --git a/crates/flashblocks/src/execution/mod.rs b/crates/flashblocks/src/execution/mod.rs index 10a7352a..33290798 100644 --- a/crates/flashblocks/src/execution/mod.rs +++ b/crates/flashblocks/src/execution/mod.rs @@ -4,6 +4,7 @@ pub(crate) mod validator; pub(crate) use validator::FlashblockSequenceValidator; use alloy_eips::eip4895::Withdrawal; +use alloy_rpc_types_engine::PayloadId; use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; use reth_optimism_primitives::OpReceipt; @@ -15,6 +16,7 @@ use reth_revm::cached::CachedReads; pub(crate) struct BuildArgs { pub(crate) base: OpFlashblockPayloadBase, + pub(crate) payload_id: PayloadId, pub(crate) transactions: I, pub(crate) withdrawals: Vec, pub(crate) last_flashblock_index: u64, @@ -23,6 +25,8 @@ pub(crate) struct BuildArgs { /// Cached prefix execution data used to resume canonical builds. #[derive(Debug, Clone, Default)] pub struct PrefixExecutionMeta { + /// The payload ID of the latest flashblocks sequence. + pub(crate) payload_id: PayloadId, /// Cached reads from execution for reuse. pub cached_reads: CachedReads, /// Number of leading transactions covered by cached execution. diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index dc28ea26..5f0ace55 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -145,8 +145,7 @@ where N::Block: From>, { // Pre-validate incoming flashblocks sequence - let pending_sequence = - self.prevalidate_incoming_sequence(args.base.block_number, args.last_flashblock_index)?; + let pending_sequence = self.prevalidate_incoming_sequence(&args)?; let parent_hash = args.base.parent_hash; let block_transactions: Vec<_> = args.transactions.into_iter().collect(); @@ -380,6 +379,7 @@ where args.base, executed_block, PrefixExecutionMeta { + payload_id: args.payload_id, cached_reads, cached_tx_count: block_transaction_count, gas_used: prefix_gas_used, @@ -431,11 +431,15 @@ where }) } - fn prevalidate_incoming_sequence( + fn prevalidate_incoming_sequence< + I: IntoIterator>>, + >( &self, - incoming_block_number: u64, - incoming_last_index: u64, + args: &BuildArgs, ) -> eyre::Result>> { + let incoming_payload_id = args.payload_id; + let incoming_block_number = args.base.block_number; + let incoming_last_index = args.last_flashblock_index; if let Some(pending) = self.flashblocks_state.get_pending_sequence() { // Validate incoming height continuity let pending_height = pending.get_height(); @@ -447,11 +451,17 @@ where )); } if pending_height == incoming_block_number { - // Validate states of last executed flashblock index - let last_index = pending.prefix_execution_meta.last_flashblock_index; - if last_index >= incoming_last_index { + // Validate for incremental builds + let pending_payload_id = pending.prefix_execution_meta.payload_id; + if pending_payload_id != incoming_payload_id { return Err(eyre::eyre!( - "flashblock index mismatch: incoming={incoming_last_index}, pending={incoming_last_index}" + "payload_id mismatch on incremental build: incoming={incoming_payload_id}, pending={pending_payload_id}" + )); + } + let pending_last_index = pending.prefix_execution_meta.last_flashblock_index; + if pending_last_index >= incoming_last_index { + return Err(eyre::eyre!( + "flashblock index mismatch: incoming={incoming_last_index}, pending={pending_last_index}" )); } return Ok(Some(pending)); From 89ee6220dcdfcfcfa787cdcb350ae41fdcfcb43c Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 25 Mar 2026 11:00:59 +0800 Subject: [PATCH 51/76] refactor(flashblocks): remove unused methods and tighten dead code attrs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove unused ConfirmCache methods (hash_for_number, remove_block_by_*, len/is_empty in non-test), move block_from_bar into test-only scope, drop unused type param on handle_execution_tasks, add Default impl for FlashblockStateCache, and use idiomatic is_none_or / contains_key. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- bin/node/src/main.rs | 1 - crates/flashblocks/src/cache/confirm.rs | 95 +++---------------- crates/flashblocks/src/cache/mod.rs | 6 ++ crates/flashblocks/src/cache/raw.rs | 4 +- crates/flashblocks/src/cache/utils.rs | 17 ++-- crates/flashblocks/src/execution/validator.rs | 3 +- crates/flashblocks/src/service.rs | 2 +- crates/flashblocks/src/state.rs | 2 +- crates/flashblocks/src/test_utils.rs | 2 + 9 files changed, 33 insertions(+), 99 deletions(-) diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index d9296d1b..176c2949 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -22,7 +22,6 @@ use reth_optimism_evm::OpEvmConfig; use reth_optimism_node::{args::RollupArgs, OpNode}; use reth_provider::CanonStateSubscriptions; use reth_rpc_server_types::RethRpcModule; -use reth_tasks::Runtime; use xlayer_chainspec::XLayerChainSpecParser; use xlayer_flashblocks::{ diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index aed427eb..4dab5ee5 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -71,16 +71,6 @@ impl ConfirmCache { } } - /// Returns the number of cached entries. - pub(crate) fn len(&self) -> usize { - self.blocks.len() - } - - /// Returns `true` if the cache is empty. - pub(crate) fn is_empty(&self) -> bool { - self.blocks.is_empty() - } - /// Inserts a confirmed block into the cache, indexed by block number and block hash. pub(crate) fn insert( &mut self, @@ -129,11 +119,6 @@ impl ConfirmCache { self.hash_to_number.get(block_hash).copied() } - /// Returns the block hash for the given block number, if cached. - pub(crate) fn hash_for_number(&self, block_number: u64) -> Option { - self.blocks.get(&block_number).map(|(hash, _)| *hash) - } - /// Returns the confirmed block for the given block hash, if present. pub(crate) fn get_block_by_hash(&self, block_hash: &B256) -> Option> { self.get_block_by_number(self.number_for_hash(block_hash)?) @@ -192,25 +177,6 @@ impl ConfirmCache { .collect()) } - /// Removes and returns the confirmed block for the given block number. - pub(crate) fn remove_block_by_number( - &mut self, - block_number: u64, - ) -> Option> { - let (hash, block) = self.blocks.remove(&block_number)?; - self.hash_to_number.remove(&hash); - self.remove_tx_index_for_block(&block); - Some(block) - } - - /// Removes and returns the confirmed block for the given block hash. - pub(crate) fn remove_block_by_hash(&mut self, block_hash: &B256) -> Option> { - let number = self.hash_to_number.remove(block_hash)?; - let (_, block) = self.blocks.remove(&number)?; - self.remove_tx_index_for_block(&block); - Some(block) - } - /// Removes all tx index entries for the transactions in the given block. fn remove_tx_index_for_block(&mut self, block: &ConfirmedBlock) { for tx in block.executed_block.recovered_block.body().transactions() { @@ -232,6 +198,18 @@ impl ConfirmCache { } count } + + /// Returns the number of cached entries. + #[cfg(test)] + pub(crate) fn len(&self) -> usize { + self.blocks.len() + } + + /// Returns `true` if the cache is empty. + #[cfg(test)] + pub(crate) fn is_empty(&self) -> bool { + self.blocks.is_empty() + } } #[cfg(test)] @@ -320,15 +298,6 @@ mod tests { assert_eq!(cache.number_for_hash(&hash), Some(10)); } - #[test] - fn test_confirm_cache_hash_for_number_returns_correct_mapping() { - let mut cache = ConfirmCache::::new(); - let block = make_executed_block(10, B256::ZERO); - let expected_hash = block.recovered_block.hash(); - cache.insert(10, block, empty_receipts()).expect("insert"); - assert_eq!(cache.hash_for_number(10), Some(expected_hash)); - } - #[test] fn test_confirm_cache_clear_removes_all_entries() { let mut cache = ConfirmCache::::new(); @@ -398,32 +367,6 @@ mod tests { assert!(cache.number_for_hash(&hashes[2]).is_some()); } - #[test] - fn test_confirm_cache_remove_block_by_number_returns_block_and_cleans_indices() { - let mut cache = ConfirmCache::::new(); - let block = make_executed_block(5, B256::ZERO); - let block_hash = block.recovered_block.hash(); - cache.insert(5, block, empty_receipts()).expect("insert"); - let removed = cache.remove_block_by_number(5); - assert!(removed.is_some()); - assert_eq!(cache.len(), 0); - assert!(cache.get_block_by_number(5).is_none()); - assert!(cache.number_for_hash(&block_hash).is_none()); - } - - #[test] - fn test_confirm_cache_remove_block_by_hash_returns_block_and_cleans_indices() { - let mut cache = ConfirmCache::::new(); - let block = make_executed_block(7, B256::ZERO); - let block_hash = block.recovered_block.hash(); - cache.insert(7, block, empty_receipts()).expect("insert"); - let removed = cache.remove_block_by_hash(&block_hash); - assert!(removed.is_some()); - assert_eq!(cache.len(), 0); - assert!(cache.get_block_by_hash(&block_hash).is_none()); - assert!(cache.get_block_by_number(7).is_none()); - } - #[test] fn test_confirm_cache_get_executed_blocks_up_to_height_returns_contiguous_blocks_newest_first() { @@ -552,20 +495,6 @@ mod tests { } } - #[test] - fn test_confirm_cache_remove_block_by_number_cleans_tx_index() { - let mut cache = ConfirmCache::::new(); - let (block, receipts) = make_executed_block_with_txs(5, B256::ZERO, 0, 2); - let tx_hashes: Vec<_> = - block.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); - cache.insert(5, block, receipts).expect("insert"); - - cache.remove_block_by_number(5); - for tx_hash in tx_hashes.iter() { - assert!(cache.get_tx_info(tx_hash).is_none()); - } - } - #[test] fn test_confirm_cache_insert_duplicate_height_leaks_stale_hash_index() { let mut cache = ConfirmCache::::new(); diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 1828914e..ce73b09f 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -66,6 +66,12 @@ pub struct FlashblockStateCache { changeset_cache: ChangesetCache, } +impl Default for FlashblockStateCache { + fn default() -> Self { + Self::new() + } +} + // FlashblockStateCache read interfaces impl FlashblockStateCache { /// Creates a new [`FlashblockStateCache`]. diff --git a/crates/flashblocks/src/cache/raw.rs b/crates/flashblocks/src/cache/raw.rs index c03f1046..377e8fda 100644 --- a/crates/flashblocks/src/cache/raw.rs +++ b/crates/flashblocks/src/cache/raw.rs @@ -64,7 +64,7 @@ impl RawFlashblocksCacheInner { let retained: Vec<_> = self .cache .drain() - .filter(|entry| entry.block_number().map_or(true, |n| n > height)) + .filter(|entry| entry.block_number().is_none_or(|n| n > height)) .collect(); for entry in retained { self.cache.enqueue(entry); @@ -161,7 +161,7 @@ impl RawFlashblocksEntry { } self.block_number() == Some(flashblock.block_number()) && self.payload_id() == Some(flashblock.payload_id) - && self.payloads.get(&flashblock.index).is_none() + && !self.payloads.contains_key(&flashblock.index) } fn try_get_best_revision(&self) -> Option { diff --git a/crates/flashblocks/src/cache/utils.rs b/crates/flashblocks/src/cache/utils.rs index 32936a90..d4652a37 100644 --- a/crates/flashblocks/src/cache/utils.rs +++ b/crates/flashblocks/src/cache/utils.rs @@ -1,21 +1,18 @@ -use reth_primitives_traits::{Block, BlockTy, NodePrimitives}; -use reth_rpc_eth_types::block::BlockAndReceipts; - -pub(crate) fn block_from_bar(bar: &BlockAndReceipts) -> BlockTy { - BlockTy::::new(bar.block.header().clone(), bar.block.body().clone()) -} - #[cfg(test)] mod tests { - use super::*; + use std::sync::Arc; + use alloy_consensus::{BlockHeader, Header}; use alloy_primitives::B256; use reth_optimism_primitives::{OpBlock, OpPrimitives}; + use reth_primitives_traits::{Block, BlockTy, NodePrimitives}; use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; use reth_rpc_eth_types::block::BlockAndReceipts; - use std::sync::Arc; - /// Builds a minimal `BlockAndReceipts` for testing. + pub(crate) fn block_from_bar(bar: &BlockAndReceipts) -> BlockTy { + BlockTy::::new(bar.block.header().clone(), bar.block.body().clone()) + } + fn make_block_and_receipts( block_number: u64, parent_hash: B256, diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index 5f0ace55..b1cc747f 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -486,7 +486,7 @@ where /// 2. Spawns a background task for incremental receipt root computation /// 3. Executes transactions with metrics collection via state hooks /// 4. Merges state transitions and records execution metrics - #[expect(clippy::type_complexity)] + #[expect(clippy::type_complexity, clippy::too_many_arguments)] fn execute_block( &mut self, state_provider: &dyn StateProvider, @@ -864,6 +864,7 @@ where } } + #[expect(clippy::type_complexity)] fn state_provider_builder( &self, hash: B256, diff --git a/crates/flashblocks/src/service.rs b/crates/flashblocks/src/service.rs index 6d4aece3..61363e54 100644 --- a/crates/flashblocks/src/service.rs +++ b/crates/flashblocks/src/service.rs @@ -233,7 +233,7 @@ where self.task_executor.spawn_critical_blocking_task( "xlayer-flashblocks-execution", async move { - handle_execution_tasks::( + handle_execution_tasks::( validator, cache, queue, ); }, diff --git a/crates/flashblocks/src/state.rs b/crates/flashblocks/src/state.rs index ecc457e7..ecba9811 100644 --- a/crates/flashblocks/src/state.rs +++ b/crates/flashblocks/src/state.rs @@ -123,7 +123,7 @@ fn process_flashblock_payload( Ok(()) } -pub fn handle_execution_tasks( +pub fn handle_execution_tasks( mut validator: FlashblockSequenceValidator, raw_cache: Arc>, task_queue: ExecutionTaskQueue, diff --git a/crates/flashblocks/src/test_utils.rs b/crates/flashblocks/src/test_utils.rs index 372c2bf0..777fb6c5 100644 --- a/crates/flashblocks/src/test_utils.rs +++ b/crates/flashblocks/src/test_utils.rs @@ -116,6 +116,7 @@ impl TestFlashBlockFactory { Self { block_time: 2, base_timestamp: 1_000_000, current_block_number: 100 } } + #[allow(dead_code)] pub(crate) fn with_block_time(mut self, block_time: u64) -> Self { self.block_time = block_time; self @@ -242,6 +243,7 @@ impl TestFlashBlockBuilder { self } + #[allow(dead_code)] pub(crate) fn transactions(mut self, transactions: Vec) -> Self { self.transactions = transactions; self From 7c0a69127653f6f7a2c31537d85bbe5cbb90c4ad Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 25 Mar 2026 11:23:30 +0800 Subject: [PATCH 52/76] fix: resolve claude issues MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/cache/confirm.rs | 17 +++++++++++------ crates/flashblocks/src/cache/mod.rs | 3 +++ crates/flashblocks/src/execution/validator.rs | 8 +++++++- crates/rpc/src/flashblocks.rs | 8 +++++++- 4 files changed, 28 insertions(+), 8 deletions(-) diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index 4dab5ee5..931301a0 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -12,7 +12,7 @@ use reth_chain_state::ExecutedBlock; use reth_primitives_traits::{BlockBody, NodePrimitives, ReceiptTy}; use reth_rpc_eth_types::block::BlockAndReceipts; -const DEFAULT_CONFIRM_BLOCK_CACHE_SIZE: usize = 1_000; +const DEFAULT_CONFIRM_BLOCK_CACHE_SIZE: usize = 50; const DEFAULT_TX_CACHE_SIZE: usize = DEFAULT_CONFIRM_BLOCK_CACHE_SIZE * 10_000; #[derive(Debug)] @@ -101,6 +101,12 @@ impl ConfirmCache { ); } + if let Some((old_hash, old_block)) = self.blocks.remove(&height) { + // Clean up old height entries if exist + self.hash_to_number.remove(&old_hash); + self.remove_tx_index_for_block(&old_block); + } + // Build block index entries for block data self.hash_to_number.insert(hash, height); self.blocks.insert(height, (hash, ConfirmedBlock { executed_block, receipts })); @@ -496,7 +502,7 @@ mod tests { } #[test] - fn test_confirm_cache_insert_duplicate_height_leaks_stale_hash_index() { + fn test_confirm_cache_insert_duplicate_height_cleans_stale_indexes() { let mut cache = ConfirmCache::::new(); let block_a = make_executed_block(10, B256::ZERO); let hash_a = block_a.recovered_block.hash(); @@ -507,12 +513,11 @@ mod tests { cache.insert(10, block_b, empty_receipts()).expect("second insert"); assert_eq!(cache.number_for_hash(&hash_b), Some(10)); - // Documents known limitation: BTreeMap::insert overwrites the value - // but doesn't clean the old hash_to_number entry. + // Stale hash_to_number entry is cleaned up on overwrite. assert_eq!( cache.number_for_hash(&hash_a), - Some(10), - "stale hash_to_number entry remains (known limitation)" + None, + "stale hash_to_number entry should be removed on duplicate height insert" ); } diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index ce73b09f..78fbcead 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -176,6 +176,7 @@ impl FlashblockStateCache { let in_memory = match in_memory { Ok(blocks) => blocks, Err(e) => { + // Flush as the overlay is non-contiguous, indicating potential poluuted state. warn!(target: "flashblocks", "Failed to get flashblocks state provider: {e}. Flushing cache"); self.inner.write().flush(); None @@ -203,6 +204,7 @@ impl FlashblockStateCache { let in_memory = match in_memory { Ok(blocks) => blocks, Err(e) => { + // Flush as the overlay is non-contiguous, indicating potential poluuted state. warn!(target: "flashblocks", "Failed to get flashblocks state provider: {e}. Flushing cache"); self.inner.write().flush(); None @@ -233,6 +235,7 @@ impl FlashblockStateCache { let in_memory = match in_memory { Ok(blocks) => blocks, Err(e) => { + // Flush as the overlay is non-contiguous, indicating potential poluuted state. warn!(target: "flashblocks", "Failed to get flashblocks state provider: {e}. Flushing cache"); self.inner.write().flush(); None diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index b1cc747f..bb6a2b68 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -420,6 +420,7 @@ where ); } self.flashblocks_state.handle_pending_sequence(PendingSequence { + // Set pending block deadline to 1 second matching default blocktime. pending: PendingBlock::with_executed_block( Instant::now() + Duration::from_secs(1), executed_block, @@ -473,7 +474,12 @@ where let canon_height = self.flashblocks_state.get_canon_height(); if incoming_block_number > canon_height + 1 { return Err(eyre::eyre!( - "height mismatch: incoming={incoming_block_number}, canonical={canon_height}" + "flashblock height too far ahead: incoming={incoming_block_number}, canonical={canon_height}" + )); + } + if incoming_block_number <= canon_height { + return Err(eyre::eyre!( + "stale height: incoming={incoming_block_number}, canonical={canon_height}" )); } Ok(None) diff --git a/crates/rpc/src/flashblocks.rs b/crates/rpc/src/flashblocks.rs index 587cf97f..20a2b25c 100644 --- a/crates/rpc/src/flashblocks.rs +++ b/crates/rpc/src/flashblocks.rs @@ -41,7 +41,13 @@ use xlayer_flashblocks::FlashblockStateCache; #[cfg_attr(test, rpc(server, client, namespace = "eth"))] pub trait FlashblocksEthApiOverride { // ----------------- Block apis ----------------- - /// Returns the current block number, with the flashblocks state cache overlay. + /// Returns the current block number as the maximum of the flashblocks confirm + /// height and the canonical chain height. + /// + /// Note: This may return a height ahead of the canonical chain when flashblocks + /// are actively being processed. Block data at this height is available through + /// the overridden `eth_getBlockByNumber` and `eth_getTransactionByHash` methods, + /// but non-overridden methods (e.g., `eth_getLogs`) only see canonical state. #[method(name = "blockNumber")] async fn block_number(&self) -> RpcResult; From 455fcaaad0b77997ef93b254fdc383bf1f341c6c Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 25 Mar 2026 12:19:34 +0800 Subject: [PATCH 53/76] fix(flashblocks): fix confirm cache insert ordering and capacity check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Reorder insert() to remove old block entries before inserting new tx index entries, preventing silent tx_index corruption when old and new blocks share transaction hashes - Allow height replacement at full capacity by checking contains_key before rejecting inserts - Add test for overlapping tx hash scenario on duplicate height insert - Document PayloadId::default() usage in spawn_prewarm 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/cache/confirm.rs | 47 ++++++++++++++++++++----- crates/flashblocks/src/service.rs | 1 + 2 files changed, 40 insertions(+), 8 deletions(-) diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index 931301a0..ee1ca102 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -78,11 +78,18 @@ impl ConfirmCache { executed_block: ExecutedBlock, receipts: Arc>>, ) -> eyre::Result<()> { - if self.blocks.len() >= DEFAULT_CONFIRM_BLOCK_CACHE_SIZE { + if self.blocks.len() >= DEFAULT_CONFIRM_BLOCK_CACHE_SIZE + && !self.blocks.contains_key(&height) + { return Err(eyre!( "confirm cache at max capacity ({DEFAULT_CONFIRM_BLOCK_CACHE_SIZE}), cannot insert block: {height}" )); } + if let Some((old_hash, old_block)) = self.blocks.remove(&height) { + // Clean up old entries at this height if exist + self.hash_to_number.remove(&old_hash); + self.remove_tx_index_for_block(&old_block); + } // Build tx index entries for all transactions in this block let hash = executed_block.recovered_block.hash(); @@ -100,13 +107,6 @@ impl ConfirmCache { }, ); } - - if let Some((old_hash, old_block)) = self.blocks.remove(&height) { - // Clean up old height entries if exist - self.hash_to_number.remove(&old_hash); - self.remove_tx_index_for_block(&old_block); - } - // Build block index entries for block data self.hash_to_number.insert(hash, height); self.blocks.insert(height, (hash, ConfirmedBlock { executed_block, receipts })); @@ -521,6 +521,37 @@ mod tests { ); } + #[test] + fn test_confirm_cache_insert_duplicate_height_retains_shared_tx_entries() { + // Two blocks at the same height share a transaction (same nonce → same hash). + // After replacing, the shared tx must still be present in the index. + let mut cache = ConfirmCache::::new(); + // block_a has txs with nonces [0, 1] + let (block_a, receipts_a) = make_executed_block_with_txs(10, B256::ZERO, 0, 2); + let shared_tx_hash: TxHash = + (*block_a.recovered_block.body().transactions().next().unwrap().tx_hash()).into(); + // block_b has txs with nonces [0, 2] — nonce 0 is shared with block_a + let (block_b, receipts_b) = make_executed_block_with_txs(10, B256::repeat_byte(0xFF), 0, 2); + let block_b_tx_hashes: Vec = block_b + .recovered_block + .body() + .transactions() + .map(|tx| (*tx.tx_hash()).into()) + .collect(); + + cache.insert(10, block_a, receipts_a).expect("first insert"); + assert!(cache.get_tx_info(&shared_tx_hash).is_some()); + + cache.insert(10, block_b, receipts_b).expect("second insert"); + // The shared tx (nonce 0) must still be in the index, pointing to block_b + let info = cache.get_tx_info(&shared_tx_hash); + assert!(info.is_some(), "shared tx should be retained after replacement"); + // All block_b txs should be present + for tx_hash in &block_b_tx_hashes { + assert!(cache.get_tx_info(tx_hash).is_some(), "block_b tx should be in index"); + } + } + #[test] fn test_confirm_cache_flush_cleans_tx_index_for_partial_flush() { let mut cache = ConfirmCache::::new(); diff --git a/crates/flashblocks/src/service.rs b/crates/flashblocks/src/service.rs index 61363e54..0e80c009 100644 --- a/crates/flashblocks/src/service.rs +++ b/crates/flashblocks/src/service.rs @@ -282,6 +282,7 @@ where hashed_state: Either::Right(trie_data.hashed_state), trie_updates: Either::Right(trie_data.trie_updates), }; + // Use default zero id — to avoid accumulating stale entries in the engine state tree. let payload = OpBuiltPayload::::new( reth_payload_builder::PayloadId::default(), Arc::new(block), From 50e4841ce99356ae3720b4ee55022ffaf8a76b93 Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 25 Mar 2026 13:16:57 +0800 Subject: [PATCH 54/76] chore: add comments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/cache/raw.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/flashblocks/src/cache/raw.rs b/crates/flashblocks/src/cache/raw.rs index 377e8fda..6b050004 100644 --- a/crates/flashblocks/src/cache/raw.rs +++ b/crates/flashblocks/src/cache/raw.rs @@ -195,6 +195,8 @@ impl RawFlashblocksEntry { } fn withdrawals_at(&self, index: u64) -> Vec { + // Per the OP Stack flashblocks spec, each diff's `withdrawals` field is cumulative + // (the complete list for the entire block), not incremental self.payloads.get(&index).map(|p| p.diff.withdrawals.clone()).unwrap_or_default() } From 40d60cb922b38e215c573846dab51e6c3448deee Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 25 Mar 2026 16:56:54 +0800 Subject: [PATCH 55/76] fix(flashblocks): guard uninitialized canon height and use OS thread for execution MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reject incoming flashblocks when canonical height is still zero (not yet initialized) to prevent premature validation. Switch the execution task from spawn_critical_blocking_task to a dedicated OS thread via reth_tasks::spawn_os_thread since it runs a blocking loop. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/execution/validator.rs | 7 ++++++- crates/flashblocks/src/service.rs | 13 ++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index bb6a2b68..0fd52833 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -470,8 +470,13 @@ where // Optimistic fresh build return Ok(None); } - // No pending sequence initialized yet. Validate with canonical chainstate height + // No pending sequence initialized yet. Validate with canonical chainstate height. let canon_height = self.flashblocks_state.get_canon_height(); + if canon_height == 0 { + return Err(eyre::eyre!( + "canonical height not yet initialized, skipping: incoming={incoming_block_number}" + )); + } if incoming_block_number > canon_height + 1 { return Err(eyre::eyre!( "flashblock height too far ahead: incoming={incoming_block_number}, canonical={canon_height}" diff --git a/crates/flashblocks/src/service.rs b/crates/flashblocks/src/service.rs index 0e80c009..ca3009d1 100644 --- a/crates/flashblocks/src/service.rs +++ b/crates/flashblocks/src/service.rs @@ -227,17 +227,12 @@ where )), ); - // Spawn the flashblocks sequence execution task handle on a dedicated blocking thread. + // Spawn the flashblocks sequence execution task on a dedicated OS thread. let cache = raw_cache.clone(); let queue = task_queue.clone(); - self.task_executor.spawn_critical_blocking_task( - "xlayer-flashblocks-execution", - async move { - handle_execution_tasks::( - validator, cache, queue, - ); - }, - ); + reth_tasks::spawn_os_thread("xlayer-flashblocks-execution", move || { + handle_execution_tasks::(validator, cache, queue); + }); // Spawn the canonical stream handle. self.task_executor.spawn_critical_task( From 878d3f5ba2edaf2e128d6848f4d38855c8f8f4b6 Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 25 Mar 2026 17:04:56 +0800 Subject: [PATCH 56/76] fix(flashblocks): reset confirm_height to canon height on flush MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Preserve canonical height continuity after a state cache flush instead of resetting confirm_height to zero, which could cause stale validation. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/cache/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 78fbcead..32debe46 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -335,8 +335,8 @@ impl FlashblockStateCacheInner { fn flush(&mut self) { warn!(target: "flashblocks", "Flushing flashblocks state cache"); self.pending_cache = None; - self.confirm_height = 0; self.confirm_cache.clear(); + self.confirm_height = self.canon_info.0; } /// Handles flushing a newly confirmed block to the confirm cache. Note that From 715fcd220339cd2471977a7daefa65cc75e69241 Mon Sep 17 00:00:00 2001 From: Niven Date: Wed, 25 Mar 2026 20:17:36 +0800 Subject: [PATCH 57/76] feat(flashblocks): extend overlay provider with canonical in-memory state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thread CanonicalInMemoryState into FlashblockStateCache so that get_overlay_data collects executed blocks from both the flashblocks cache and the engine's in-memory canonical chain, anchoring the state provider at the on-disk persistence boundary instead of the canon hash. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- bin/node/src/main.rs | 2 +- crates/flashblocks/src/cache/mod.rs | 73 ++++++++++++------- crates/flashblocks/src/execution/validator.rs | 11 +-- crates/rpc/src/default.rs | 10 ++- 4 files changed, 63 insertions(+), 33 deletions(-) diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index 176c2949..d0af1113 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -152,7 +152,7 @@ fn main() { args.xlayer_args.flashblocks_rpc.flashblock_url { // Initialize flashblocks RPC - let flashblocks_state = FlashblockStateCache::new(); + let flashblocks_state = FlashblockStateCache::new(ctx.provider().canonical_in_memory_state()); let canon_state_rx = ctx.provider().canonical_state_stream(); let service = FlashblocksRpcService::new( args.xlayer_args.builder.flashblocks, diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 32debe46..495c147b 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -17,7 +17,8 @@ use tracing::*; use alloy_consensus::BlockHeader; use alloy_primitives::{TxHash, B256}; use alloy_rpc_types_eth::{BlockId, BlockNumberOrTag}; -use reth_chain_state::{ExecutedBlock, MemoryOverlayStateProvider}; + +use reth_chain_state::{CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider}; use reth_primitives_traits::{NodePrimitives, ReceiptTy, SealedHeaderFor}; use reth_rpc_eth_types::block::BlockAndReceipts; use reth_storage_api::StateProviderBox; @@ -64,21 +65,17 @@ pub struct CachedTxInfo { pub struct FlashblockStateCache { inner: Arc>>, changeset_cache: ChangesetCache, -} - -impl Default for FlashblockStateCache { - fn default() -> Self { - Self::new() - } + canon_in_memory_state: CanonicalInMemoryState, } // FlashblockStateCache read interfaces impl FlashblockStateCache { /// Creates a new [`FlashblockStateCache`]. - pub fn new() -> Self { + pub fn new(canon_in_memory_state: CanonicalInMemoryState) -> Self { Self { inner: Arc::new(RwLock::new(FlashblockStateCacheInner::new())), changeset_cache: ChangesetCache::new(), + canon_in_memory_state, } } } @@ -216,32 +213,56 @@ impl FlashblockStateCache { )) } - /// Returns all available blocks for the given hash that lead back to the - /// canonical chain (from newest to oldest), the parent hash of the oldest - /// returned block, and the sealed header of the specified block hash. + /// Returns all overlay blocks for the given hash, spanning from the + /// persisted on-disk anchor up through the flashblocks state cache. + /// + /// Overlay blocks are collected newest-to-oldest from two layers: + /// 1. **Flashblocks state cache** — pending + confirmed blocks + /// 2. **Engine canonical in-memory state** — blocks committed to the + /// canonical chain but not yet persisted to disk /// - /// Returns `None` if the block for the given hash is not found. + /// Returns the overlay blocks, sealed header of the requested block, + /// and the on-disk anchor hash. Returns `Ok(None)` if the block is + /// not found in either layer (i.e. it is fully persisted on disk). + #[expect(clippy::type_complexity)] pub fn get_overlay_data( &self, block_hash: &B256, - ) -> Option<(Vec>, SealedHeaderFor, B256)> { + ) -> eyre::Result>, SealedHeaderFor, B256)>> { + // 1. Retrieve flashblocks state cache overlay let guard = self.inner.read(); - let block = guard.get_block_by_hash(block_hash)?.block; - let block_num = block.number(); + let mut header = + guard.get_block_by_hash(block_hash).map(|bar| bar.block.clone_sealed_header()); + let mut overlay = if let Some(ref h) = header { + let block_num = h.number(); + guard.get_executed_blocks_up_to_height(block_num)?.unwrap_or_default() + } else { + Vec::new() + }; let canon_hash = guard.get_canon_info().1; - let in_memory = guard.get_executed_blocks_up_to_height(block_num); drop(guard); - let in_memory = match in_memory { - Ok(blocks) => blocks, - Err(e) => { - // Flush as the overlay is non-contiguous, indicating potential poluuted state. - warn!(target: "flashblocks", "Failed to get flashblocks state provider: {e}. Flushing cache"); - self.inner.write().flush(); - None - } - }?; - Some((in_memory, block.clone_sealed_header(), canon_hash)) + // 2. Retrieve engine canonical in-memory blocks + let anchor_hash = + if let Some(block_state) = self.canon_in_memory_state.state_by_hash(canon_hash) { + let anchor = block_state.anchor(); + if header.is_none() { + header = block_state + .chain() + .find(|s| s.hash() == *block_hash) + .map(|s| s.block_ref().recovered_block().sealed_header().clone()) + } + overlay.extend(block_state.chain().map(|s| s.block())); + anchor.hash + } else { + canon_hash + }; + + if overlay.is_empty() || header.is_none() { + // Block hash not found, already persisted to disk + return Ok(None); + } + Ok(Some((overlay, header.expect("valid cached header"), anchor_hash))) } } diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index 0fd52833..85a97da7 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -884,9 +884,10 @@ where SealedHeaderFor, Option<(Vec>, B256)>, )> { - // Get overlay data (executed blocks + parent header) from flashblocks cache - if let Some((overlay_blocks, header, canon_hash)) = - self.flashblocks_state.get_overlay_data(&hash) + // Get overlay data (executed blocks + parent header) from flashblocks + // state cache and the canonical in-memory cache. + if let Some((overlay_blocks, header, anchor_hash)) = + self.flashblocks_state.get_overlay_data(&hash)? { debug!( target: "flashblocks::validator", @@ -895,11 +896,11 @@ where return Ok(( StateProviderBuilder::new( self.provider.clone(), - canon_hash, + anchor_hash, Some(overlay_blocks.clone()), ), header, - Some((overlay_blocks, canon_hash)), + Some((overlay_blocks, anchor_hash)), )); } // Check if block is persisted diff --git a/crates/rpc/src/default.rs b/crates/rpc/src/default.rs index dfb755b2..544d7ef3 100644 --- a/crates/rpc/src/default.rs +++ b/crates/rpc/src/default.rs @@ -60,7 +60,15 @@ mod tests { #[test] fn test_flashblocks_disabled_at_zero_height() { - let cache = FlashblockStateCache::::new(); + let cache = FlashblockStateCache::::new( + reth_chain_state::CanonicalInMemoryState::new( + Default::default(), + Default::default(), + None, + None, + None, + ), + ); let ext = DefaultRpcExt::new(Some(cache)); assert!(ext.flashblocks_state.as_ref().unwrap().get_confirm_height() == 0); } From 862793ae598f87b35f4959b0b9758056c599a389 Mon Sep 17 00:00:00 2001 From: Niven Date: Thu, 26 Mar 2026 17:49:18 +0800 Subject: [PATCH 58/76] feat: Add sending flashblocks target index MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/builder/src/flashblocks/builder.rs | 29 +++-- crates/builder/src/flashblocks/handler.rs | 7 +- crates/builder/src/flashblocks/mod.rs | 2 + crates/builder/src/flashblocks/payload.rs | 52 ++++++++ crates/builder/src/flashblocks/utils/p2p.rs | 6 +- crates/builder/src/flashblocks/utils/wspub.rs | 15 ++- crates/flashblocks/src/cache/mod.rs | 8 +- crates/flashblocks/src/cache/pending.rs | 6 +- crates/flashblocks/src/cache/raw.rs | 120 ++++++++++-------- crates/flashblocks/src/execution/mod.rs | 1 + crates/flashblocks/src/execution/validator.rs | 27 ++-- crates/flashblocks/src/lib.rs | 4 +- crates/flashblocks/src/persist.rs | 10 +- crates/flashblocks/src/service.rs | 8 +- crates/flashblocks/src/state.rs | 18 +-- crates/flashblocks/src/ws/decoding.rs | 16 +-- crates/flashblocks/src/ws/stream.rs | 14 +- 17 files changed, 219 insertions(+), 124 deletions(-) create mode 100644 crates/builder/src/flashblocks/payload.rs diff --git a/crates/builder/src/flashblocks/builder.rs b/crates/builder/src/flashblocks/builder.rs index 8afb33bd..fc898457 100644 --- a/crates/builder/src/flashblocks/builder.rs +++ b/crates/builder/src/flashblocks/builder.rs @@ -8,7 +8,7 @@ use crate::{ utils::{ cache::FlashblockPayloadsCache, execution::ExecutionInfo, wspub::WebSocketPublisher, }, - BuilderConfig, + BuilderConfig, XLayerFlashblockPayload, }, metrics::tokio::FlashblocksTaskMetrics, metrics::BuilderMetrics, @@ -195,7 +195,7 @@ pub(super) struct FlashblocksBuilder { pub task_executor: Tasks, /// Sender for sending built flashblock payloads to [`PayloadHandler`], /// which broadcasts outgoing flashblock payloads via p2p. - pub built_fb_payload_tx: mpsc::Sender, + pub built_fb_payload_tx: mpsc::Sender, /// Sender for sending built full block payloads to [`PayloadHandler`], /// which updates the engine tree state. pub built_payload_tx: mpsc::Sender, @@ -226,7 +226,7 @@ impl FlashblocksBuilder { task_executor: Tasks, config: BuilderConfig, builder_tx: FlashblocksBuilderTx, - built_fb_payload_tx: mpsc::Sender, + built_fb_payload_tx: mpsc::Sender, built_payload_tx: mpsc::Sender, p2p_cache: FlashblockPayloadsCache, ws_pub: Arc, @@ -421,12 +421,6 @@ where // We should always calculate state root for fallback payload let (fallback_payload, fb_payload, bundle_state, new_tx_hashes) = build_block(&mut state, &ctx, &mut info, Some(&mut fb_state), true)?; - // For X Layer - skip if replaying - if !rebuild_external_payload { - self.built_fb_payload_tx - .try_send(fb_payload.clone()) - .map_err(PayloadBuilderError::other)?; - } let mut best_payload = (fallback_payload.clone(), bundle_state); info!( @@ -438,8 +432,15 @@ where // not emitting flashblock if no_tx_pool in FCU, it's just syncing // For X Layer - skip if replaying if !ctx.attributes().no_tx_pool && !rebuild_external_payload { + // For X Layer - skip if replaying + let fb_payload_with_count = XLayerFlashblockPayload::new(fb_payload.clone(), 0); let flashblock_byte_size = - self.ws_pub.publish(&fb_payload).map_err(PayloadBuilderError::other)?; + self.ws_pub.publish(&fb_payload_with_count).map_err(PayloadBuilderError::other)?; + if !rebuild_external_payload { + self.built_fb_payload_tx + .try_send(fb_payload_with_count) + .map_err(PayloadBuilderError::other)?; + } ctx.metrics.flashblock_byte_size_histogram.record(flashblock_byte_size as f64); // For X Layer, full link monitoring support @@ -757,12 +758,16 @@ where fb_payload.index = flashblock_index; fb_payload.base = None; + let fb_payload_with_count = XLayerFlashblockPayload::new( + fb_payload.clone(), + fb_state.target_flashblock_count(), + ); let flashblock_byte_size = self .ws_pub - .publish(&fb_payload) + .publish(&fb_payload_with_count) .wrap_err("failed to publish flashblock via websocket")?; self.built_fb_payload_tx - .try_send(fb_payload) + .try_send(fb_payload_with_count) .wrap_err("failed to send built payload to handler")?; *best_payload = (new_payload, bundle_state); diff --git a/crates/builder/src/flashblocks/handler.rs b/crates/builder/src/flashblocks/handler.rs index 9ef9c078..91dd9aaf 100644 --- a/crates/builder/src/flashblocks/handler.rs +++ b/crates/builder/src/flashblocks/handler.rs @@ -5,6 +5,7 @@ use crate::{ cache::FlashblockPayloadsCache, execution::ExecutionInfo, p2p::Message, wspub::WebSocketPublisher, }, + XLayerFlashblockPayload, }, traits::ClientBounds, }; @@ -42,7 +43,7 @@ pub(crate) struct FlashblocksPayloadHandler { // handler context for external flashblock execution ctx: FlashblockHandlerContext, // receives new flashblock payloads built by this builder. - built_fb_payload_rx: mpsc::Receiver, + built_fb_payload_rx: mpsc::Receiver, // receives new full block payloads built by this builder. built_payload_rx: mpsc::Receiver, // receives incoming p2p messages from peers. @@ -72,7 +73,7 @@ where #[allow(clippy::too_many_arguments)] pub(crate) fn new( ctx: FlashblockHandlerContext, - built_fb_payload_rx: mpsc::Receiver, + built_fb_payload_rx: mpsc::Receiver, built_payload_rx: mpsc::Receiver, p2p_rx: mpsc::Receiver, p2p_tx: mpsc::Sender, @@ -187,7 +188,7 @@ where })); } Message::OpFlashblockPayload(fb_payload) => { - if let Err(e) = p2p_cache.add_flashblock_payload(fb_payload.clone()) { + if let Err(e) = p2p_cache.add_flashblock_payload(fb_payload.inner.clone()) { warn!(target: "payload_builder", e = ?e, "failed to add flashblock txs to cache"); } if let Err(e) = ws_pub.publish(&fb_payload) { diff --git a/crates/builder/src/flashblocks/mod.rs b/crates/builder/src/flashblocks/mod.rs index 7aa6c267..e7df91ed 100644 --- a/crates/builder/src/flashblocks/mod.rs +++ b/crates/builder/src/flashblocks/mod.rs @@ -15,11 +15,13 @@ mod context; mod generator; mod handler; mod handler_ctx; +mod payload; mod service; mod timing; pub(crate) mod utils; pub use context::FlashblocksBuilderCtx; +pub use payload::XLayerFlashblockPayload; pub use service::{FlashblocksServiceBuilder, PayloadEventsSender}; pub use utils::{cache::FlashblockPayloadsCache, wspub::WebSocketPublisher}; diff --git a/crates/builder/src/flashblocks/payload.rs b/crates/builder/src/flashblocks/payload.rs new file mode 100644 index 00000000..3349be2a --- /dev/null +++ b/crates/builder/src/flashblocks/payload.rs @@ -0,0 +1,52 @@ +use op_alloy_rpc_types_engine::OpFlashblockPayload; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct XLayerFlashblockPayload { + #[serde(flatten)] + pub inner: OpFlashblockPayload, + /// The target flashblock index that the builder will build until. Default to zero if + /// unset yet, for base flashblock payload. + #[serde(default)] + pub target_index: u64, +} + +impl XLayerFlashblockPayload { + pub fn new(inner: OpFlashblockPayload, target_index: u64) -> Self { + Self { inner, target_index } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_xlayer_payload_serializes_flat() { + let payload = OpFlashblockPayload::default(); + let wrapped = XLayerFlashblockPayload::new(payload.clone(), 7); + let json = serde_json::to_string(&wrapped).unwrap(); + // target_index should appear at top level, not nested + assert!(json.contains("\"target_index\":7")); + // inner fields should also be at top level + assert!(json.contains("\"index\":")); + } + + #[test] + fn test_backwards_compat_old_consumer_ignores_target_index() { + let payload = OpFlashblockPayload::default(); + let wrapped = XLayerFlashblockPayload::new(payload, 7); + let json = serde_json::to_string(&wrapped).unwrap(); + // Old consumer deserializes as OpFlashblockPayload — should succeed + let _: OpFlashblockPayload = serde_json::from_str(&json).unwrap(); + } + + #[test] + fn test_backwards_compat_new_consumer_defaults_target_index() { + let payload = OpFlashblockPayload::default(); + let json = serde_json::to_string(&payload).unwrap(); + // New consumer deserializes as XLayerFlashblockPayload — target_index defaults to 0 + let wrapped: XLayerFlashblockPayload = serde_json::from_str(&json).unwrap(); + assert_eq!(wrapped.target_index, 0); + } +} diff --git a/crates/builder/src/flashblocks/utils/p2p.rs b/crates/builder/src/flashblocks/utils/p2p.rs index 6d616984..afff3c63 100644 --- a/crates/builder/src/flashblocks/utils/p2p.rs +++ b/crates/builder/src/flashblocks/utils/p2p.rs @@ -1,5 +1,5 @@ +use crate::flashblocks::XLayerFlashblockPayload; use alloy_primitives::U256; -use op_alloy_rpc_types_engine::OpFlashblockPayload; use serde::{Deserialize, Serialize}; use reth::{core::primitives::SealedBlock, payload::PayloadId}; @@ -13,7 +13,7 @@ pub(crate) const FLASHBLOCKS_STREAM_PROTOCOL: crate::p2p::StreamProtocol = #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub(crate) enum Message { OpBuiltPayload(OpBuiltPayload), - OpFlashblockPayload(OpFlashblockPayload), + OpFlashblockPayload(XLayerFlashblockPayload), } impl crate::p2p::Message for Message { @@ -39,7 +39,7 @@ impl Message { Message::OpBuiltPayload(value.into()) } - pub(crate) fn from_flashblock_payload(value: OpFlashblockPayload) -> Self { + pub(crate) fn from_flashblock_payload(value: XLayerFlashblockPayload) -> Self { Message::OpFlashblockPayload(value) } } diff --git a/crates/builder/src/flashblocks/utils/wspub.rs b/crates/builder/src/flashblocks/utils/wspub.rs index d3f29e1d..f7871aea 100644 --- a/crates/builder/src/flashblocks/utils/wspub.rs +++ b/crates/builder/src/flashblocks/utils/wspub.rs @@ -1,3 +1,6 @@ +use crate::{ + flashblocks::XLayerFlashblockPayload, metrics::tokio::MonitoredTask, metrics::BuilderMetrics, +}; use core::{ fmt::{Debug, Formatter}, net::SocketAddr, @@ -5,7 +8,6 @@ use core::{ }; use futures::SinkExt; use futures_util::StreamExt; -use op_alloy_rpc_types_engine::OpFlashblockPayload; use std::{io, net::TcpListener, sync::Arc}; use tokio::{ net::TcpStream, @@ -24,8 +26,6 @@ use tokio_tungstenite::{ }; use tracing::{debug, info, trace, warn}; -use crate::{metrics::tokio::MonitoredTask, metrics::BuilderMetrics}; - /// A WebSockets publisher that accepts connections from client websockets and broadcasts to them /// updates about new flashblocks. It maintains a count of sent messages and active subscriptions. /// @@ -65,7 +65,7 @@ impl WebSocketPublisher { Ok(Self { sent, subs, term, pipe, subscriber_limit }) } - pub fn publish(&self, payload: &OpFlashblockPayload) -> io::Result { + pub fn publish(&self, payload: &XLayerFlashblockPayload) -> io::Result { // Serialize the payload to a UTF-8 string // serialize only once, then just copy around only a pointer // to the serialized data for each subscription. @@ -73,9 +73,10 @@ impl WebSocketPublisher { target: "payload_builder", event = "flashblock_sent", message = "Sending flashblock to subscribers", - id = %payload.payload_id, - index = payload.index, - base = payload.base.is_some(), + id = %payload.inner.payload_id, + index = payload.inner.index, + base = payload.inner.base.is_some(), + target_index = payload.target_index, ); let serialized = serde_json::to_string(payload)?; diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 495c147b..7f1c5bc5 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -284,8 +284,9 @@ impl FlashblockStateCache { pub fn handle_pending_sequence( &self, pending_sequence: PendingSequence, + target_index: u64, ) -> eyre::Result<()> { - self.inner.write().handle_pending_sequence(pending_sequence) + self.inner.write().handle_pending_sequence(pending_sequence, target_index) } /// Handles a canonical block committed to the canonical chainstate. @@ -386,11 +387,14 @@ impl FlashblockStateCacheInner { fn handle_pending_sequence( &mut self, pending_sequence: PendingSequence, + target_index: u64, ) -> eyre::Result<()> { let pending_height = pending_sequence.get_height(); let expected_height = self.confirm_height + 1; - if pending_height == expected_height + 1 { + if (target_index > 0 && pending_sequence.get_last_flashblock_index() >= target_index) + || pending_height == expected_height + 1 + { // Pending tip has advanced — update pending state, and optimistically // commit current pending to confirm cache let sequence = self.pending_cache.take().ok_or_else(|| { diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs index 15ef2c1d..bc404dad 100644 --- a/crates/flashblocks/src/cache/pending.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -37,14 +37,16 @@ impl PendingSequence { self.pending.to_block_and_receipts() } - /// Returns the cached transaction info for the given tx hash, if present - /// in the pending sequence. pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option<(CachedTxInfo, BlockAndReceipts)> { self.tx_index .get(tx_hash) .cloned() .map(|tx_info| (tx_info, self.pending.to_block_and_receipts())) } + + pub fn get_last_flashblock_index(&self) -> u64 { + self.prefix_execution_meta.last_flashblock_index + } } #[cfg(test)] diff --git a/crates/flashblocks/src/cache/raw.rs b/crates/flashblocks/src/cache/raw.rs index 6b050004..f89968bf 100644 --- a/crates/flashblocks/src/cache/raw.rs +++ b/crates/flashblocks/src/cache/raw.rs @@ -9,6 +9,8 @@ use op_alloy_rpc_types_engine::{OpFlashblockPayload, OpFlashblockPayloadBase}; use reth_primitives_traits::{Recovered, SignedTransaction}; +use xlayer_builder::flashblocks::XLayerFlashblockPayload; + const MAX_RAW_CACHE_SIZE: usize = 10; /// The raw flashblocks sequence cache for new incoming flashblocks from the sequencer. @@ -35,8 +37,8 @@ impl RawFlashblocksCache { self.inner.write().handle_canonical_height(height); } - pub fn handle_flashblock(&self, flashblock: OpFlashblockPayload) -> eyre::Result<()> { - self.inner.write().handle_flashblock(flashblock) + pub fn handle_flashblock(&self, payload: XLayerFlashblockPayload) -> eyre::Result<()> { + self.inner.write().handle_flashblock(payload) } pub(crate) fn try_get_buildable_args( @@ -71,7 +73,8 @@ impl RawFlashblocksCacheInner { } } - pub fn handle_flashblock(&mut self, flashblock: OpFlashblockPayload) -> eyre::Result<()> { + pub fn handle_flashblock(&mut self, payload: XLayerFlashblockPayload) -> eyre::Result<()> { + let XLayerFlashblockPayload { inner: flashblock, target_index } = payload; let incoming_height = flashblock.block_number(); if incoming_height <= self.canon_height { return Err(eyre::eyre!( @@ -86,12 +89,12 @@ impl RawFlashblocksCacheInner { self.cache.iter_mut().find(|entry| entry.payload_id() == Some(flashblock.payload_id)); if let Some(entry) = existing { - entry.insert_flashblock(flashblock)?; + entry.insert_flashblock(flashblock, target_index)?; } else { // New sequence — push to ring buffer, evicting the oldest entry // when the cache is full. let mut entry = RawFlashblocksEntry::new(); - entry.insert_flashblock(flashblock)?; + entry.insert_flashblock(flashblock, target_index)?; self.cache.enqueue(entry); } Ok(()) @@ -121,6 +124,8 @@ struct RawFlashblocksEntry { recovered_transactions_by_index: BTreeMap>>>, /// Tracks if the accumulated sequence has received the first base flashblock has_base: bool, + /// The sequencer's target flashblock index. Zero if unset. + target_index: u64, } impl RawFlashblocksEntry { @@ -129,11 +134,16 @@ impl RawFlashblocksEntry { payloads: BTreeMap::new(), recovered_transactions_by_index: BTreeMap::new(), has_base: false, + target_index: 0, } } /// Inserts a flashblock into the sequence. - fn insert_flashblock(&mut self, flashblock: OpFlashblockPayload) -> eyre::Result<()> { + fn insert_flashblock( + &mut self, + flashblock: OpFlashblockPayload, + target_index: u64, + ) -> eyre::Result<()> { if !self.can_accept(&flashblock) { return Err(eyre::eyre!( "Incoming flashblock failed to be accepted into the sequence, possible re-org detected: incoming_id={:?}, current_id={:?}, incoming_height={}, current_height={:?}", @@ -147,6 +157,9 @@ impl RawFlashblocksEntry { if flashblock.index == 0 { self.has_base = true; } + if target_index > 0 { + self.target_index = target_index; + } let flashblock_index = flashblock.index; let recovered_txs = flashblock.recover_transactions().collect::, _>>()?; self.payloads.insert(flashblock_index, flashblock); @@ -215,6 +228,7 @@ impl RawFlashblocksEntry { transactions: self.transactions_up_to(best_revision), withdrawals: self.withdrawals_at(best_revision), last_flashblock_index: best_revision, + target_index: self.target_index, }) } @@ -232,6 +246,12 @@ mod tests { type TestRawCache = RawFlashblocksCacheInner; + /// Wraps an [`OpFlashblockPayload`] into an [`XLayerFlashblockPayload`] with + /// `target_index: 0` for tests that don't care about the target count. + fn wrap(fb: OpFlashblockPayload) -> XLayerFlashblockPayload { + XLayerFlashblockPayload::new(fb, 0) + } + #[test] fn test_raw_entry_can_accept_first_flashblock_on_empty_entry() { // Arrange @@ -240,7 +260,7 @@ mod tests { let mut cache = TestRawCache::new(); // Act - let result = cache.handle_flashblock(fb0); + let result = cache.handle_flashblock(wrap(fb0)); // Assert: empty entry accepts anything without error assert!(result.is_ok(), "empty entry should accept first flashblock"); @@ -254,8 +274,8 @@ mod tests { let fb0_dup = factory.flashblock_at(0).build(); let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb0).expect("first flashblock should succeed"); - let result = cache.handle_flashblock(fb0_dup); + cache.handle_flashblock(wrap(fb0)).expect("first flashblock should succeed"); + let result = cache.handle_flashblock(wrap(fb0_dup)); assert!(result.is_err(), "duplicate index within same sequence should be rejected"); } @@ -266,14 +286,14 @@ mod tests { let payload_id = fb0.payload_id; let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb0).expect("first flashblock should succeed"); + cache.handle_flashblock(wrap(fb0)).expect("first flashblock should succeed"); let fb_wrong_block = factory .builder() .index(1) .block_number(999) // different block number .payload_id(payload_id) .build(); - let result = cache.handle_flashblock(fb_wrong_block); + let result = cache.handle_flashblock(wrap(fb_wrong_block)); assert!(result.is_err(), "mismatched block number with same payload_id should be rejected"); assert_eq!(cache.cache.len(), 1, "rejected flashblock should not create a new entry"); } @@ -286,8 +306,8 @@ mod tests { let fb2 = factory.builder().index(2).block_number(100).payload_id(payload_id).build(); let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb0).expect("fb0 insert"); - let result = cache.handle_flashblock(fb2); + cache.handle_flashblock(wrap(fb0)).expect("fb0 insert"); + let result = cache.handle_flashblock(wrap(fb2)); assert!(result.is_ok(), "out-of-order unique index should be accepted"); } @@ -297,7 +317,7 @@ mod tests { let fb1 = factory.builder().index(1).block_number(100).build(); let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb1).expect("fb1 insert"); + cache.handle_flashblock(wrap(fb1)).expect("fb1 insert"); let entry = cache.cache.iter().next().expect("entry should exist"); let best = entry.try_get_best_revision(); assert!(best.is_none(), "get_best_revision should return None without base (index 0)"); @@ -309,7 +329,7 @@ mod tests { let fb0 = factory.flashblock_at(0).build(); let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb0).expect("fb0 insert"); + cache.handle_flashblock(wrap(fb0)).expect("fb0 insert"); let entry = cache.cache.iter().next().expect("entry should exist"); let best = entry.try_get_best_revision(); assert_eq!(best, Some(0), "only index 0 → best revision is 0"); @@ -324,10 +344,10 @@ mod tests { let fb3 = factory.flashblock_after(&fb2).build(); let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb0).expect("fb0"); - cache.handle_flashblock(fb1).expect("fb1"); - cache.handle_flashblock(fb2).expect("fb2"); - cache.handle_flashblock(fb3).expect("fb3"); + cache.handle_flashblock(wrap(fb0)).expect("fb0"); + cache.handle_flashblock(wrap(fb1)).expect("fb1"); + cache.handle_flashblock(wrap(fb2)).expect("fb2"); + cache.handle_flashblock(wrap(fb3)).expect("fb3"); let entry = cache.cache.iter().next().expect("entry should exist"); let best = entry.try_get_best_revision(); assert_eq!(best, Some(3), "consecutive 0..3 → best revision 3"); @@ -342,9 +362,9 @@ mod tests { let fb3 = factory.builder().index(3).block_number(100).payload_id(payload_id).build(); let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb0).expect("fb0"); - cache.handle_flashblock(fb1).expect("fb1"); - cache.handle_flashblock(fb3).expect("fb3 (gap after index 1)"); + cache.handle_flashblock(wrap(fb0)).expect("fb0"); + cache.handle_flashblock(wrap(fb1)).expect("fb1"); + cache.handle_flashblock(wrap(fb3)).expect("fb3 (gap after index 1)"); let entry = cache.cache.iter().next().expect("entry should exist"); let best = entry.try_get_best_revision(); assert_eq!(best, Some(1), "gap between 1 and 3 → best revision is 1"); @@ -357,8 +377,8 @@ mod tests { let fb101 = factory.flashblock_for_next_block(&fb100).build(); let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb100).expect("fb100"); - cache.handle_flashblock(fb101).expect("fb101"); + cache.handle_flashblock(wrap(fb100)).expect("fb100"); + cache.handle_flashblock(wrap(fb101)).expect("fb101"); assert_eq!(cache.cache.len(), 2); cache.handle_canonical_height(100); assert_eq!(cache.cache.len(), 1, "block 100 entry should be evicted"); @@ -375,9 +395,9 @@ mod tests { let fb102 = factory.flashblock_for_next_block(&fb101).build(); let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb100).expect("fb100"); - cache.handle_flashblock(fb101).expect("fb101"); - cache.handle_flashblock(fb102).expect("fb102"); + cache.handle_flashblock(wrap(fb100)).expect("fb100"); + cache.handle_flashblock(wrap(fb101)).expect("fb101"); + cache.handle_flashblock(wrap(fb102)).expect("fb102"); assert_eq!(cache.cache.len(), 3); cache.handle_canonical_height(102); assert_eq!(cache.cache.len(), 0, "all entries at or below height 102 should be evicted"); @@ -390,8 +410,8 @@ mod tests { let fb101 = factory.flashblock_for_next_block(&fb100).build(); let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb100).expect("fb100"); - cache.handle_flashblock(fb101).expect("fb101"); + cache.handle_flashblock(wrap(fb100)).expect("fb100"); + cache.handle_flashblock(wrap(fb101)).expect("fb101"); cache.handle_canonical_height(99); assert_eq!(cache.cache.len(), 2, "no entries should be evicted below their block numbers"); } @@ -403,7 +423,7 @@ mod tests { let mut cache = TestRawCache::new(); cache.handle_canonical_height(100); - let result = cache.handle_flashblock(fb100); + let result = cache.handle_flashblock(wrap(fb100)); assert!(result.is_err(), "flashblock at canonical height should be rejected"); assert_eq!(cache.cache.len(), 0, "flashblock at canonical height should not be inserted"); } @@ -415,10 +435,10 @@ mod tests { let fb0_seq2 = factory.flashblock_for_next_block(&fb0_seq1).build(); let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb0_seq1.clone()).expect("seq1 fb0"); - cache.handle_flashblock(fb0_seq2.clone()).expect("seq2 fb0"); + cache.handle_flashblock(wrap(fb0_seq1.clone())).expect("seq1 fb0"); + cache.handle_flashblock(wrap(fb0_seq2.clone())).expect("seq2 fb0"); let fb1_seq1 = factory.flashblock_after(&fb0_seq1).build(); - cache.handle_flashblock(fb1_seq1).expect("seq1 fb1"); + cache.handle_flashblock(wrap(fb1_seq1)).expect("seq1 fb1"); let entries: Vec<_> = cache.cache.iter().collect(); assert_eq!(entries.len(), 2, "should have two separate entries for two payload_ids"); } @@ -429,12 +449,12 @@ mod tests { let mut prev_fb = factory.flashblock_at(0).build(); let first_block_num = prev_fb.metadata.block_number; let mut cache = TestRawCache::new(); - cache.handle_flashblock(prev_fb.clone()).expect("first fb"); + cache.handle_flashblock(wrap(prev_fb.clone())).expect("first fb"); // Fill up to MAX_RAW_CACHE_SIZE (10) unique sequences for _ in 1..MAX_RAW_CACHE_SIZE { let next_fb = factory.flashblock_for_next_block(&prev_fb).build(); - cache.handle_flashblock(next_fb.clone()).expect("fill fb"); + cache.handle_flashblock(wrap(next_fb.clone())).expect("fill fb"); prev_fb = next_fb; } assert_eq!(cache.cache.len(), MAX_RAW_CACHE_SIZE, "cache should be at max capacity"); @@ -442,7 +462,7 @@ mod tests { // Insert one more sequence to trigger FIFO eviction let overflow_fb = factory.flashblock_for_next_block(&prev_fb).build(); let overflow_block_num = overflow_fb.metadata.block_number; - cache.handle_flashblock(overflow_fb).expect("overflow fb"); + cache.handle_flashblock(wrap(overflow_fb)).expect("overflow fb"); // Assert: cache is still at max size (oldest entry evicted) assert_eq!(cache.cache.len(), MAX_RAW_CACHE_SIZE, "cache size should remain at max"); @@ -466,7 +486,7 @@ mod tests { let expected_block = fb0.metadata.block_number; let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb0).expect("fb0 insert"); + cache.handle_flashblock(wrap(fb0)).expect("fb0 insert"); let entry = cache.cache.iter().next().expect("entry should exist"); assert_eq!(entry.block_number(), Some(expected_block)); } @@ -477,7 +497,7 @@ mod tests { let fb0 = factory.flashblock_at(0).build(); // no transactions set let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb0).expect("fb0 insert"); + cache.handle_flashblock(wrap(fb0)).expect("fb0 insert"); let entry = cache.cache.iter().next().expect("entry should exist"); assert_eq!(entry.transaction_count(), 0, "flashblock with no txs should have count 0"); } @@ -488,7 +508,7 @@ mod tests { let fb0 = factory.flashblock_at(0).build(); let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb0).expect("fb0 insert"); + cache.handle_flashblock(wrap(fb0)).expect("fb0 insert"); let entry = cache.cache.iter().next().expect("entry should exist"); assert!(entry.has_base, "has_base should be true after inserting index 0"); } @@ -499,7 +519,7 @@ mod tests { let fb1 = factory.builder().index(1).block_number(100).build(); let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb1).expect("fb1 insert"); + cache.handle_flashblock(wrap(fb1)).expect("fb1 insert"); let entry = cache.cache.iter().next().expect("entry should exist"); assert!(!entry.has_base, "has_base should be false when only index 1 inserted"); } @@ -510,7 +530,7 @@ mod tests { let fb0 = factory.flashblock_at(0).build(); let cache = RawFlashblocksCache::::new(); - let result = cache.handle_flashblock(fb0); + let result = cache.handle_flashblock(wrap(fb0)); assert!(result.is_ok(), "handle_flashblock via Arc wrapper should succeed"); } @@ -520,7 +540,7 @@ mod tests { let fb1 = factory.builder().index(1).block_number(100).build(); let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb1).expect("fb1 insert"); + cache.handle_flashblock(wrap(fb1)).expect("fb1 insert"); let entry = cache.cache.iter().next().expect("entry should exist"); let best = entry.try_get_best_revision(); // Assert: no base → None, even though index 1 exists @@ -538,8 +558,8 @@ mod tests { factory.builder().index(2).block_number(block_number).payload_id(payload_id).build(); let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb0).expect("fb0"); - cache.handle_flashblock(fb2).expect("fb2"); + cache.handle_flashblock(wrap(fb0)).expect("fb0"); + cache.handle_flashblock(wrap(fb2)).expect("fb2"); let entry = cache.cache.iter().next().expect("entry should exist"); let best = entry.try_get_best_revision(); // Assert: gap immediately after base (index 1 missing) → best revision is 0 @@ -558,14 +578,14 @@ mod tests { ]); let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb0.clone()).expect("fb0 insert"); + cache.handle_flashblock(wrap(fb0.clone())).expect("fb0 insert"); let fb_diff = factory .builder() .index(0) .block_number(fb0.metadata.block_number) .payload_id(different_payload_id) .build(); - let result = cache.handle_flashblock(fb_diff); + let result = cache.handle_flashblock(wrap(fb_diff)); // Assert: new entry created (no error), but we now have 2 entries assert!(result.is_ok(), "different payload_id with same block creates new entry"); assert_eq!( @@ -584,10 +604,10 @@ mod tests { let fb3 = factory.flashblock_after(&fb2).build(); let mut cache = TestRawCache::new(); - cache.handle_flashblock(fb0).expect("fb0"); - cache.handle_flashblock(fb1).expect("fb1"); - cache.handle_flashblock(fb2).expect("fb2"); - cache.handle_flashblock(fb3).expect("fb3"); + cache.handle_flashblock(wrap(fb0)).expect("fb0"); + cache.handle_flashblock(wrap(fb1)).expect("fb1"); + cache.handle_flashblock(wrap(fb2)).expect("fb2"); + cache.handle_flashblock(wrap(fb3)).expect("fb3"); // Assert: all four go into a single entry (same payload_id) assert_eq!( cache.cache.len(), diff --git a/crates/flashblocks/src/execution/mod.rs b/crates/flashblocks/src/execution/mod.rs index 33290798..e80038a8 100644 --- a/crates/flashblocks/src/execution/mod.rs +++ b/crates/flashblocks/src/execution/mod.rs @@ -20,6 +20,7 @@ pub(crate) struct BuildArgs { pub(crate) transactions: I, pub(crate) withdrawals: Vec, pub(crate) last_flashblock_index: u64, + pub(crate) target_index: u64, } /// Cached prefix execution data used to resume canonical builds. diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index 85a97da7..add95384 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -387,6 +387,7 @@ where last_flashblock_index: args.last_flashblock_index, }, block_transaction_count, + args.target_index, )?; Ok(()) @@ -400,6 +401,7 @@ where executed_block: ExecutedBlock, prefix_execution_meta: PrefixExecutionMeta, transaction_count: usize, + target_index: u64, ) -> eyre::Result<()> { let block_hash = executed_block.recovered_block.hash(); let parent_hash = base.parent_hash; @@ -419,17 +421,20 @@ where }, ); } - self.flashblocks_state.handle_pending_sequence(PendingSequence { - // Set pending block deadline to 1 second matching default blocktime. - pending: PendingBlock::with_executed_block( - Instant::now() + Duration::from_secs(1), - executed_block, - ), - prefix_execution_meta, - tx_index, - block_hash, - parent_hash, - }) + self.flashblocks_state.handle_pending_sequence( + PendingSequence { + // Set pending block deadline to 1 second matching default blocktime. + pending: PendingBlock::with_executed_block( + Instant::now() + Duration::from_secs(1), + executed_block, + ), + prefix_execution_meta, + tx_index, + block_hash, + parent_hash, + }, + target_index, + ) } fn prevalidate_incoming_sequence< diff --git a/crates/flashblocks/src/lib.rs b/crates/flashblocks/src/lib.rs index 4628cfda..35953c24 100644 --- a/crates/flashblocks/src/lib.rs +++ b/crates/flashblocks/src/lib.rs @@ -17,8 +17,8 @@ pub use service::{FlashblocksPersistCtx, FlashblocksRpcCtx, FlashblocksRpcServic pub use subscription::FlashblocksPubSub; pub use ws::WsFlashBlockStream; -use op_alloy_rpc_types_engine::OpFlashblockPayload; use std::sync::Arc; +use xlayer_builder::flashblocks::XLayerFlashblockPayload; pub type PendingSequenceRx = tokio::sync::watch::Receiver>>; -pub type ReceivedFlashblocksRx = tokio::sync::broadcast::Receiver>; +pub type ReceivedFlashblocksRx = tokio::sync::broadcast::Receiver>; diff --git a/crates/flashblocks/src/persist.rs b/crates/flashblocks/src/persist.rs index 0ba9fb7a..69b5fb16 100644 --- a/crates/flashblocks/src/persist.rs +++ b/crates/flashblocks/src/persist.rs @@ -19,7 +19,7 @@ pub async fn handle_persistence(mut rx: ReceivedFlashblocksRx, datadir: ChainPat result = rx.recv() => { match result { Ok(flashblock) => { - if let Err(e) = cache.add_flashblock_payload(flashblock.as_ref().clone()) { + if let Err(e) = cache.add_flashblock_payload(flashblock.inner.clone()) { warn!(target: "flashblocks", "Failed to cache flashblock payload: {e}"); continue; } @@ -67,16 +67,16 @@ pub async fn handle_relay_flashblocks( trace!( target: "flashblocks", "Received flashblock: index={}, block_hash={}", - flashblock.index, - flashblock.diff.block_hash + flashblock.inner.index, + flashblock.inner.diff.block_hash ); match ws_pub.publish(&flashblock) { Ok(_) => { trace!( target: "flashblocks", "Published flashblock: index={}, block_hash={}", - flashblock.index, - flashblock.diff.block_hash + flashblock.inner.index, + flashblock.inner.diff.block_hash ); } Err(e) => { diff --git a/crates/flashblocks/src/service.rs b/crates/flashblocks/src/service.rs index ca3009d1..50169c9b 100644 --- a/crates/flashblocks/src/service.rs +++ b/crates/flashblocks/src/service.rs @@ -15,7 +15,7 @@ use tokio::sync::broadcast::Sender; use tracing::*; use alloy_eips::eip2718::Encodable2718; -use op_alloy_rpc_types_engine::{OpFlashblockPayload, OpFlashblockPayloadBase}; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; use reth_chain_state::CanonStateNotificationStream; use reth_engine_primitives::TreeConfig; @@ -30,7 +30,7 @@ use reth_tasks::TaskExecutor; use xlayer_builder::{ args::FlashblocksArgs, - flashblocks::{PayloadEventsSender, WebSocketPublisher}, + flashblocks::{PayloadEventsSender, WebSocketPublisher, XLayerFlashblockPayload}, metrics::{tokio::FlashblocksTaskMetrics, BuilderMetrics}, }; @@ -113,7 +113,7 @@ where /// Task executor. task_executor: TaskExecutor, /// Broadcast channel to forward received flashblocks from the subscription. - received_flashblocks_tx: Sender>, + received_flashblocks_tx: Sender>, } impl FlashblocksRpcService @@ -201,7 +201,7 @@ where pub fn spawn_rpc(self, incoming_rx: S) where - S: Stream> + Unpin + Send + 'static, + S: Stream> + Unpin + Send + 'static, { debug!(target: "flashblocks", "Initializing flashblocks rpc"); let raw_cache = Arc::new(RawFlashblocksCache::new()); diff --git a/crates/flashblocks/src/state.rs b/crates/flashblocks/src/state.rs index ecba9811..2886b246 100644 --- a/crates/flashblocks/src/state.rs +++ b/crates/flashblocks/src/state.rs @@ -13,7 +13,7 @@ use tracing::*; use alloy_consensus::BlockHeader; use alloy_eips::eip2718::Encodable2718; -use op_alloy_rpc_types_engine::{OpFlashblockPayload, OpFlashblockPayloadBase}; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; use reth_chain_state::CanonStateNotificationStream; use reth_evm::ConfigureEvm; @@ -23,15 +23,17 @@ use reth_provider::{ BlockReader, HashedPostStateProvider, HeaderProvider, StateProviderFactory, StateReader, }; +use xlayer_builder::flashblocks::XLayerFlashblockPayload; + const CONNECTION_BACKOUT_PERIOD: Duration = Duration::from_secs(5); pub async fn handle_incoming_flashblocks( mut incoming_rx: S, - received_tx: Sender>, + received_tx: Sender>, raw_cache: Arc>, task_queue: ExecutionTaskQueue, ) where - S: Stream> + Unpin + Send + 'static, + S: Stream> + Unpin + Send + 'static, N: NodePrimitives, { info!(target: "flashblocks", "Flashblocks raw handle started"); @@ -94,17 +96,17 @@ pub async fn handle_incoming_flashblocks( } fn process_flashblock_payload( - flashblock: OpFlashblockPayload, - received_tx: &tokio::sync::broadcast::Sender>, + payload: XLayerFlashblockPayload, + received_tx: &tokio::sync::broadcast::Sender>, raw_cache: &RawFlashblocksCache, task_queue: &ExecutionTaskQueue, ) -> eyre::Result<()> { if received_tx.receiver_count() > 0 { - let _ = received_tx.send(Arc::new(flashblock.clone())); + let _ = received_tx.send(Arc::new(payload.clone())); } // Insert into raw cache - let height = flashblock.block_number(); - raw_cache.handle_flashblock(flashblock)?; + let height = payload.inner.block_number(); + raw_cache.handle_flashblock(payload)?; // Enqueue to execution tasks let mut queue = diff --git a/crates/flashblocks/src/ws/decoding.rs b/crates/flashblocks/src/ws/decoding.rs index a09ef329..060ca353 100644 --- a/crates/flashblocks/src/ws/decoding.rs +++ b/crates/flashblocks/src/ws/decoding.rs @@ -1,27 +1,25 @@ +use alloy_primitives::bytes::Bytes; use std::io; -use alloy_primitives::bytes::Bytes; -use op_alloy_rpc_types_engine::OpFlashblockPayload; +use xlayer_builder::flashblocks::XLayerFlashblockPayload; /// A trait for decoding flashblocks from bytes. pub trait FlashBlockDecoder: Send + 'static { - /// Decodes `bytes` into a [`OpFlashblockPayload`]. - fn decode(&self, bytes: Bytes) -> eyre::Result; + /// Decodes `bytes` into an [`XLayerFlashblockPayload`]. + fn decode(&self, bytes: Bytes) -> eyre::Result; } /// Default implementation of the decoder. impl FlashBlockDecoder for () { - fn decode(&self, bytes: Bytes) -> eyre::Result { + fn decode(&self, bytes: Bytes) -> eyre::Result { decode_flashblock(bytes) } } -pub(crate) fn decode_flashblock(bytes: Bytes) -> eyre::Result { +pub(crate) fn decode_flashblock(bytes: Bytes) -> eyre::Result { let bytes = crate::ws::decoding::try_parse_message(bytes)?; - - let payload: OpFlashblockPayload = + let payload: XLayerFlashblockPayload = serde_json::from_slice(&bytes).map_err(|e| eyre::eyre!("failed to parse message: {e}"))?; - Ok(payload) } diff --git a/crates/flashblocks/src/ws/stream.rs b/crates/flashblocks/src/ws/stream.rs index 39666833..76c2b21b 100644 --- a/crates/flashblocks/src/ws/stream.rs +++ b/crates/flashblocks/src/ws/stream.rs @@ -18,9 +18,9 @@ use tokio_tungstenite::{ use tracing::debug; use url::Url; -use op_alloy_rpc_types_engine::OpFlashblockPayload; +use xlayer_builder::flashblocks::XLayerFlashblockPayload; -/// An asynchronous stream of [`OpFlashblockPayload`] from a websocket connection. +/// An asynchronous stream of [`XLayerFlashblockPayload`] from a websocket connection. /// /// The stream attempts to connect to a websocket URL and then decode each received item. /// @@ -50,7 +50,7 @@ impl WsFlashBlockStream { } } - /// Sets the [`OpFlashblockPayload`] decoder for the websocket stream. + /// Sets the [`XLayerFlashblockPayload`] decoder for the websocket stream. pub fn with_decoder(self, decoder: Box) -> Self { Self { decoder, ..self } } @@ -77,7 +77,7 @@ where S: Sink + Send + Unpin, C: WsConnect + Clone + Send + 'static + Unpin, { - type Item = eyre::Result; + type Item = eyre::Result; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -246,6 +246,7 @@ mod tests { use super::*; use alloy_primitives::bytes::Bytes; use brotli::enc::BrotliEncoderParams; + use op_alloy_rpc_types_engine::OpFlashblockPayload; use std::{future, iter}; use tokio_tungstenite::tungstenite::{ protocol::frame::{coding::CloseCode, Frame}, @@ -471,7 +472,8 @@ mod tests { let actual_messages: Vec<_> = stream.take(1).map(Result::unwrap).collect().await; let expected_messages = flashblocks.to_vec(); - + let actual_messages: Vec<_> = actual_messages.iter().map(|m| &m.inner).collect(); + let expected_messages: Vec<_> = expected_messages.iter().collect(); assert_eq!(actual_messages, expected_messages); } @@ -488,7 +490,7 @@ mod tests { let actual_message = stream.next().await.expect("Binary message should not be ignored").unwrap(); - assert_eq!(actual_message, expected_message) + assert_eq!(actual_message.inner, expected_message) } #[tokio::test] From e570398c9d3dcc8223be847083bbfdb1cdbccf0d Mon Sep 17 00:00:00 2001 From: Niven Date: Thu, 26 Mar 2026 18:56:31 +0800 Subject: [PATCH 59/76] fix(flashblocks): fix pending_stale flush by promoting complete sequences to confirm cache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Split the promotion logic in handle_pending_sequence to handle target_index completion and next-block arrival as separate cases. When target_index is reached for the current block, the complete sequence is promoted directly to the confirm cache and pending is cleared, preventing stale pending detection when the canonical block arrives. Also update validator prevalidation to use confirm_height instead of canon_height for flashblock height checks. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/cache/mod.rs | 34 ++++++++++++------- crates/flashblocks/src/execution/validator.rs | 16 ++++----- 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 7f1c5bc5..8463e7a2 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -392,11 +392,24 @@ impl FlashblockStateCacheInner { let pending_height = pending_sequence.get_height(); let expected_height = self.confirm_height + 1; - if (target_index > 0 && pending_sequence.get_last_flashblock_index() >= target_index) - || pending_height == expected_height + 1 - { - // Pending tip has advanced — update pending state, and optimistically - // commit current pending to confirm cache + if pending_height == expected_height { + let incoming_seq = pending_sequence.clone(); + if target_index > 0 && pending_sequence.get_last_flashblock_index() >= target_index { + // Target flashblock. Promote to confirm, and clear pending state + self.handle_confirmed_block( + expected_height, + incoming_seq.pending.executed_block, + incoming_seq.pending.receipts, + )?; + self.pending_cache = None; + } else { + // In-progress — replace pending with newer flashblock + self.pending_cache = Some(incoming_seq); + } + } else if pending_height == expected_height + 1 { + // The next block's flashblock arrived. Somehow target flashblocks was + // missed. Promote current pending to confirm, and set incoming as new + // pending sequence. let sequence = self.pending_cache.take().ok_or_else(|| { eyre::eyre!( "polluted state cache - trying to advance pending tip but no current pending" @@ -408,16 +421,12 @@ impl FlashblockStateCacheInner { sequence.pending.receipts, )?; self.pending_cache = Some(pending_sequence.clone()); - let _ = self.pending_sequence_tx.send(Some(pending_sequence)); - } else if pending_height == expected_height { - // Replace the existing pending sequence - self.pending_cache = Some(pending_sequence.clone()); - let _ = self.pending_sequence_tx.send(Some(pending_sequence)); } else { return Err(eyre::eyre!( "polluted state cache - not next consecutive pending height block" )); } + let _ = self.pending_sequence_tx.send(Some(pending_sequence)); Ok(()) } @@ -516,8 +525,9 @@ impl FlashblockStateCacheInner { /// /// Both failure modes reduce to: every height between `canonical_height + 1` and /// the target must be present in the overlay. This invariant is naturally maintained - /// by `handle_confirmed_block` (rejects non-consecutive heights) and the pending - /// block always being `confirm_height + 1`. + /// by `handle_confirmed_block` (rejects non-consecutive heights). The pending block, + /// if present, sits at `confirm_height + 1`; it may be absent after a complete + /// sequence is promoted directly to the confirm cache via `target_index`. /// /// On validation failure (non-contiguous overlay or gap to canonical), the cache is /// flushed and `None` is returned. diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index add95384..6439002d 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -475,21 +475,21 @@ where // Optimistic fresh build return Ok(None); } - // No pending sequence initialized yet. Validate with canonical chainstate height. - let canon_height = self.flashblocks_state.get_canon_height(); - if canon_height == 0 { + // No pending sequence. Validate with flashblocks state cache highest confirm height + let confirm_height = self.flashblocks_state.get_confirm_height(); + if confirm_height == 0 { return Err(eyre::eyre!( - "canonical height not yet initialized, skipping: incoming={incoming_block_number}" + "confirm height not yet initialized, skipping: incoming={incoming_block_number}" )); } - if incoming_block_number > canon_height + 1 { + if incoming_block_number > confirm_height + 1 { return Err(eyre::eyre!( - "flashblock height too far ahead: incoming={incoming_block_number}, canonical={canon_height}" + "flashblock height too far ahead: incoming={incoming_block_number}, confirm={confirm_height}" )); } - if incoming_block_number <= canon_height { + if incoming_block_number <= confirm_height { return Err(eyre::eyre!( - "stale height: incoming={incoming_block_number}, canonical={canon_height}" + "stale height: incoming={incoming_block_number}, confirm={confirm_height}" )); } Ok(None) From 99d8168f4112cd27c5ec78c272a6a99fe74f7155 Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 27 Mar 2026 11:47:23 +0800 Subject: [PATCH 60/76] fix(flashblocks): apply cumulative gas offset during suffix execution MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the cumulative gas offset application from merge_suffix_results into the per-receipt execution loop so that receipts sent to the incremental root builder already carry the correct cumulative_gas_used. Refactor FlashblockReceipt::add_cumulative_gas_offset to operate on a single receipt instead of a slice. Also improve logging consistency and add target_index to execution trace. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/cache/mod.rs | 10 +++++++-- crates/flashblocks/src/execution/mod.rs | 10 ++++----- crates/flashblocks/src/execution/validator.rs | 22 +++++++++++-------- crates/flashblocks/src/state.rs | 1 + 4 files changed, 26 insertions(+), 17 deletions(-) diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 8463e7a2..e2c28c1b 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -381,6 +381,12 @@ impl FlashblockStateCacheInner { } self.confirm_height = block_number; self.confirm_cache.insert(block_number, executed_block, receipts)?; + info!( + target: "flashblocks", + confirm_height = self.confirm_height, + canonical_height = self.canon_info.0, + "Committed pending block to confirm flashblocks state cache", + ); Ok(()) } @@ -438,7 +444,7 @@ impl FlashblockStateCacheInner { warn!( target: "flashblocks", canonical_height = canon_info.0, - cache_height = self.confirm_height, + confirm_height = self.confirm_height, canonical_reorg = reorg, pending_stale, "Reorg or pending stale detected on handle canonical block", @@ -448,7 +454,7 @@ impl FlashblockStateCacheInner { debug!( target: "flashblocks", canonical_height = canon_info.0, - cache_height = self.confirm_height, + confirm_height = self.confirm_height, "Evicting flashblocks state inner cache" ); diff --git a/crates/flashblocks/src/execution/mod.rs b/crates/flashblocks/src/execution/mod.rs index e80038a8..ba622c1f 100644 --- a/crates/flashblocks/src/execution/mod.rs +++ b/crates/flashblocks/src/execution/mod.rs @@ -54,18 +54,16 @@ enum StateRootStrategy { /// Receipt requirements for cache-resume flow. pub trait FlashblockReceipt: Clone { /// Adds `gas_offset` to each receipt's `cumulative_gas_used`. - fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64); + fn add_cumulative_gas_offset(&mut self, gas_offset: u64); } impl FlashblockReceipt for OpReceipt { - fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64) { + fn add_cumulative_gas_offset(&mut self, gas_offset: u64) { if gas_offset == 0 { return; } - for receipt in receipts { - let inner = receipt.as_receipt_mut(); - inner.cumulative_gas_used = inner.cumulative_gas_used.saturating_add(gas_offset); - } + let inner = self.as_receipt_mut(); + inner.cumulative_gas_used = inner.cumulative_gas_used.saturating_add(gas_offset); } } diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index 6439002d..0b06e866 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -570,7 +570,7 @@ where } // Execute all transactions and finalize - let (executor, suffix_senders) = self.execute_transactions( + let (executor, suffix_senders, suffix_receipts) = self.execute_transactions( executor, pending_sequence, transaction_count, @@ -579,8 +579,9 @@ where )?; drop(receipt_tx); - // Finish execution and get the result + // Finish execution and replace with the generated suffix receipts let (_evm, mut result) = executor.finish().map(|(evm, result)| (evm.into_db(), result))?; + result.receipts = suffix_receipts; if let Some(seq) = pending_sequence { result = Self::merge_suffix_results( &seq.prefix_execution_meta, @@ -611,6 +612,7 @@ where Ok((output, senders, result_rx, read_cache)) } + #[expect(clippy::type_complexity)] fn execute_transactions( &self, mut executor: Executor, @@ -618,7 +620,7 @@ where transaction_count: usize, handle: &mut PayloadHandle, receipt_tx: &crossbeam_channel::Sender>, - ) -> eyre::Result<(Executor, Vec
), BlockExecutionError> + ) -> eyre::Result<(Executor, Vec
, Vec), BlockExecutionError> where T: ExecutableTxFor + ExecutableTxParts< @@ -642,6 +644,7 @@ where }; let mut senders = Vec::with_capacity(transaction_count); + let mut receipts = Vec::new(); let mut transactions = handle.iter_transactions(); // Some executors may execute transactions that do not append receipts during the @@ -649,6 +652,7 @@ where // In that case, invoking the callback on every transaction would resend the previous // receipt with the same index and can panic the ordered root builder. let mut last_sent_len = 0usize; + let prefix_gas_used = pending_sequence.map_or(0, |seq| seq.prefix_execution_meta.gas_used); loop { let Some(tx_result) = transactions.next() else { break }; @@ -663,13 +667,15 @@ where if current_len > last_sent_len { last_sent_len = current_len; // Send the latest receipt to the background task for incremental root computation. - if let Some(receipt) = executor.receipts().last() { + if let Some(mut receipt) = executor.receipts().last().cloned() { let tx_index = receipt_index_offset + current_len - 1; - let _ = receipt_tx.send(IndexedReceipt::new(tx_index, receipt.clone())); + receipt.add_cumulative_gas_offset(prefix_gas_used); + receipts.push(receipt.clone()); + let _ = receipt_tx.send(IndexedReceipt::new(tx_index, receipt)); } } } - Ok((executor, senders)) + Ok((executor, senders, receipts)) } /// Determines the state root computation strategy based on configuration. @@ -860,10 +866,8 @@ where fn merge_suffix_results( cached_prefix: &PrefixExecutionMeta, cached_receipts: Vec, - mut suffix_result: BlockExecutionResult, + suffix_result: BlockExecutionResult, ) -> BlockExecutionResult { - N::Receipt::add_cumulative_gas_offset(&mut suffix_result.receipts, cached_prefix.gas_used); - let mut receipts = cached_receipts; receipts.extend(suffix_result.receipts); diff --git a/crates/flashblocks/src/state.rs b/crates/flashblocks/src/state.rs index 2886b246..fb112e2e 100644 --- a/crates/flashblocks/src/state.rs +++ b/crates/flashblocks/src/state.rs @@ -183,6 +183,7 @@ pub fn handle_execution_tasks( target: "flashblocks", execute_height, last_index = args.last_flashblock_index, + target_index = args.target_index, "Executing flashblocks sequence" ); From 3775a30d97c8744a02b25ca616e7fd4d5542d830 Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 27 Mar 2026 14:43:33 +0800 Subject: [PATCH 61/76] fix(flashblocks): use pending sequence hash for incremental state provider MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For incremental builds, resolve the state provider from the pending sequence block hash instead of the parent hash so the overlay trie anchors at the latest executed prefix. Thread the parent SealedHeader into PendingSequence so the EVM env and state root remain consistent across incremental steps. Pass evm_env directly to execute_block to avoid cloning the full ExecutionEnv. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/cache/pending.rs | 10 ++-- crates/flashblocks/src/execution/validator.rs | 49 +++++++++++-------- 2 files changed, 33 insertions(+), 26 deletions(-) diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs index bc404dad..5d9361ac 100644 --- a/crates/flashblocks/src/cache/pending.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -5,6 +5,7 @@ use std::collections::HashMap; use alloy_consensus::BlockHeader; use alloy_primitives::{TxHash, B256}; use reth_primitives_traits::NodePrimitives; +use reth_primitives_traits::SealedHeader; use reth_rpc_eth_types::{block::BlockAndReceipts, PendingBlock}; /// The pending flashblocks sequence built with all received `OpFlashblockPayload` @@ -19,7 +20,7 @@ pub struct PendingSequence { /// The current block hash of the latest flashblocks sequence. pub block_hash: B256, /// Parent hash of the built block (may be non-canonical or canonical). - pub parent_hash: B256, + pub parent_header: SealedHeader, /// Prefix execution metadata for incremental builds. pub prefix_execution_meta: PrefixExecutionMeta, } @@ -65,13 +66,12 @@ mod tests { fn make_pending_sequence(block_number: u64) -> PendingSequence { let executed = make_executed_block(block_number, B256::ZERO); let block_hash = executed.recovered_block.hash(); - let parent_hash = executed.recovered_block.parent_hash(); let pending_block = PendingBlock::with_executed_block(Instant::now(), executed); PendingSequence { pending: pending_block, tx_index: HashMap::new(), block_hash, - parent_hash, + parent_header: Default::default(), prefix_execution_meta: Default::default(), } } @@ -82,8 +82,6 @@ mod tests { ) -> PendingSequence { let executed = make_executed_block(block_number, B256::ZERO); let block_hash = executed.recovered_block.hash(); - let parent_hash = executed.recovered_block.parent_hash(); - let mut tx_index = HashMap::new(); for i in 0..tx_count { let tx = mock_tx(i as u64); @@ -104,7 +102,7 @@ mod tests { pending: pending_block, tx_index, block_hash, - parent_hash, + parent_header: Default::default(), prefix_execution_meta: Default::default(), } } diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index 0b06e866..9b6d9ffe 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -36,7 +36,7 @@ use reth_errors::BlockExecutionError; use reth_errors::RethError; use reth_evm::{ execute::{BlockExecutor, ExecutableTxFor}, - ConfigureEvm, Evm, TxEnvFor, + ConfigureEvm, Evm, EvmEnvFor, TxEnvFor, }; use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; use reth_optimism_forks::OpHardforks; @@ -159,20 +159,29 @@ where } else { block_transactions.clone() }; - // Get state provider builder of parent hash - let (provider_builder, parent_header, overlay_data) = - self.state_provider_builder(parent_hash)?; + + // Get state provider builder. + // 1. Fresh builds - get the provider builder from parent hash. + // 2. Incremental builds - get provider builder from pending sequence hash. + let hash = pending_sequence.as_ref().map_or(parent_hash, |seq| seq.get_hash()); + let (provider_builder, header, overlay_data) = self.state_provider_builder(hash)?; let mut state_provider = provider_builder.build()?; + // For incremental builds, use the previous index's computed state root so the incremental + // prefix trie nodes (PreservedSparseTrie) are re-used. + let parent_state_root = header.state_root(); + let parent_header = + pending_sequence.as_ref().map_or(header, |seq| seq.parent_header.clone()); + let attrs = args.base.clone().into(); let evm_env = self.evm_config.next_evm_env(&parent_header, &attrs).map_err(RethError::other)?; let execution_env = ExecutionEnv { - evm_env, + evm_env: evm_env.clone(), hash: B256::ZERO, parent_hash, - parent_state_root: parent_header.state_root(), + parent_state_root, transaction_count: transactions.len(), withdrawals: Some(args.withdrawals), }; @@ -192,7 +201,7 @@ where // Create lazy overlay from ancestors - this doesn't block, allowing execution to start // before the trie data is ready. The overlay will be computed on first access. let (lazy_overlay, anchor_hash) = - Self::get_parent_lazy_overlay(overlay_data.as_ref(), parent_hash); + Self::get_parent_lazy_overlay(overlay_data.as_ref(), hash); // Create overlay factory for payload processor (StateRootTask path needs it for // multiproofs) @@ -205,7 +214,7 @@ where // Spawn the appropriate processor based on strategy. let mut handle = self.spawn_payload_processor( - execution_env.clone(), + execution_env, transactions.clone(), provider_builder, overlay_factory.clone(), @@ -225,7 +234,7 @@ where // as transactions complete, allowing parallel computation during execution. let (output, senders, receipt_root_rx, cached_reads) = self.execute_block( state_provider.as_ref(), - execution_env, + evm_env, &parent_header, attrs, transactions, @@ -378,6 +387,7 @@ where self.commit_pending_sequence( args.base, executed_block, + parent_header, PrefixExecutionMeta { payload_id: args.payload_id, cached_reads, @@ -399,16 +409,14 @@ where &self, base: OpFlashblockPayloadBase, executed_block: ExecutedBlock, + parent_header: SealedHeaderFor, prefix_execution_meta: PrefixExecutionMeta, transaction_count: usize, target_index: u64, ) -> eyre::Result<()> { - let block_hash = executed_block.recovered_block.hash(); - let parent_hash = base.parent_hash; - // Build tx index + let block_hash = executed_block.recovered_block.hash(); let mut tx_index = HashMap::with_capacity(transaction_count); - for (idx, tx) in executed_block.recovered_block.transactions_recovered().enumerate() { tx_index.insert( *tx.tx_hash(), @@ -421,6 +429,7 @@ where }, ); } + self.flashblocks_state.handle_pending_sequence( PendingSequence { // Set pending block deadline to 1 second matching default blocktime. @@ -428,10 +437,10 @@ where Instant::now() + Duration::from_secs(1), executed_block, ), - prefix_execution_meta, tx_index, block_hash, - parent_hash, + parent_header, + prefix_execution_meta, }, target_index, ) @@ -506,7 +515,7 @@ where fn execute_block( &mut self, state_provider: &dyn StateProvider, - execution_env: ExecutionEnv, + evm_env: EvmEnvFor, parent_header: &SealedHeaderFor, attrs: EvmConfig::NextBlockEnvCtx, transactions: Vec>>, @@ -543,7 +552,7 @@ where db.set_state_clear_flag(true); } - let evm = self.evm_config.evm_with_env(&mut db, execution_env.evm_env); + let evm = self.evm_config.evm_with_env(&mut db, evm_env); let execution_ctx = self .evm_config .context_for_next_block(parent_header, attrs) @@ -933,13 +942,13 @@ where /// block hash (the highest persisted ancestor). This allows execution to start immediately /// while the trie input computation is deferred until the overlay is actually needed. /// - /// If parent is on disk (no in-memory blocks), returns `(None, parent_hash)`. + /// If parent is on disk (no in-memory blocks), returns `(None, tip_hash)`. fn get_parent_lazy_overlay( overlay_data: Option<&(Vec>, B256)>, - parent_hash: B256, + tip_hash: B256, ) -> (Option, B256) { let Some((blocks, anchor)) = overlay_data else { - return (None, parent_hash); + return (None, tip_hash); }; let anchor_hash = *anchor; From 811eb5d62f5fac717d71a3231647c1a7ae320ce1 Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 27 Mar 2026 17:15:21 +0800 Subject: [PATCH 62/76] fix(flashblocks): flatten multi-transition reverts for incremental builds MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Incremental builds accumulate one revert entry per flashblock index via merge_transitions, but the engine persistence service expects a single revert per block. Flatten all revert transitions by keeping the earliest (parent-state) account info per address and merging storage slot reverts across transitions, preserving wipe_storage from later indices. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/execution/validator.rs | 37 ++++++++++++++++++- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index 9b6d9ffe..db102927 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -51,7 +51,13 @@ use reth_provider::{ use reth_revm::{ cached::CachedReads, database::StateProviderDatabase, - db::{states::bundle_state::BundleRetention, State}, + db::{ + states::{ + bundle_state::BundleRetention, + reverts::{AccountRevert, Reverts}, + }, + State, + }, }; use reth_rpc_eth_types::PendingBlock; use reth_tasks::Runtime; @@ -612,9 +618,36 @@ where // Explicitly drop db to release the mutable borrow on read_cache held via cached_db, // allowing read_cache to be moved into the return value. - let bundle = db.take_bundle(); + let mut bundle = db.take_bundle(); drop(db); + // For incremental builds, the bundle accumulates one revert entry per flashblock + // index (from with_bundle_prestate + merge_transitions at each index). The engine + // persistence service expects a single revert entry per block. Flatten all revert + // transitions into one: + // - Keep the earliest (parent-state) account info revert per address + // - Merge storage reverts across transitions (earliest per slot via or_insert) + if pending_sequence.is_some() && bundle.reverts.len() > 1 { + let mut reverts_map = HashMap::::new(); + for reverts in bundle.reverts.iter() { + for (addr, new_revert) in reverts { + if let Some(revert_entry) = reverts_map.get_mut(addr) { + // Merge new storage slots from later transitions and keep the + // earliest value per slot (parent-state revert entry). + for (slot, slot_revert) in &new_revert.storage { + revert_entry.storage.entry(*slot).or_insert(*slot_revert); + } + // Propagate wipe_storage if any transition triggers it, such as + // SELFDESTRUCT in a later flashblock index. + revert_entry.wipe_storage |= new_revert.wipe_storage; + } else { + reverts_map.insert(*addr, new_revert.clone()); + } + } + } + bundle.reverts = Reverts::new(vec![reverts_map.into_iter().collect()]); + } + let output = BlockExecutionOutput { result, state: bundle }; debug!(target: "flashblocks::validator", "Executed block"); From bb05216cfaee4be26dedaaaae58e43fe44379e08 Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 27 Mar 2026 17:56:50 +0800 Subject: [PATCH 63/76] feat(flashblocks): add debug state comparison between flashblocks and engine MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a debug mode (--xlayer.flashblocks-debug-state-comparison) that compares ExecutedBlock bundle states, reverts, and trie data between the flashblocks RPC cache and the engine canonical state on each new canonical block. Also add a flag to disable pre-warming so the engine computes payloads independently for accurate comparison. The heavy comparison runs on a blocking thread to avoid stalling the canonical stream handler. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- bin/node/src/args.rs | 16 +++ bin/node/src/main.rs | 5 +- crates/flashblocks/src/cache/mod.rs | 2 +- crates/flashblocks/src/debug.rs | 153 ++++++++++++++++++++++++++++ crates/flashblocks/src/lib.rs | 1 + crates/flashblocks/src/service.rs | 3 + crates/flashblocks/src/state.rs | 11 ++ 7 files changed, 189 insertions(+), 2 deletions(-) create mode 100644 crates/flashblocks/src/debug.rs diff --git a/bin/node/src/args.rs b/bin/node/src/args.rs index 826fc989..0aadc8ca 100644 --- a/bin/node/src/args.rs +++ b/bin/node/src/args.rs @@ -259,6 +259,22 @@ pub struct FlashblocksRpcArgs { default_value = "1000" )] pub flashblocks_subscription_max_addresses: usize, + + /// Enable flashblocks RPC state comparison debug mode + #[arg( + long = "xlayer.flashblocks-debug-state-comparison", + help = "Enable flashblocks RPC state comparison debug mode", + default_value = "false" + )] + pub flashblocks_debug_state_comparison: bool, + + /// Disable flashblocks RPC pre-warming engine state + #[arg( + long = "xlayer.flashblocks-disable-pre-warming", + help = "Disable flashblocks RPC pre-warming engine state", + default_value = "false" + )] + pub flashblocks_disable_pre_warming: bool, } #[cfg(test)] diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index d0af1113..02a04bd6 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -164,13 +164,16 @@ fn main() { evm_config: OpEvmConfig::optimism(ctx.provider().chain_spec()), chain_spec: ctx.provider().chain_spec(), tree_config, + debug_state_comparison: args.xlayer_args.flashblocks_rpc.flashblocks_debug_state_comparison, }, FlashblocksPersistCtx { datadir, relay_flashblocks: args.rollup_args.flashblocks_url.is_some(), }, )?; - service.spawn_prewarm(events_sender); + if !args.xlayer_args.flashblocks_rpc.flashblocks_disable_pre_warming { + service.spawn_prewarm(events_sender); + } service.spawn_persistence()?; service.spawn_rpc(WsFlashBlockStream::new(flashblock_url)); info!(target: "reth::cli", "xlayer flashblocks service initialized"); diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index e2c28c1b..c8ed85ee 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -65,7 +65,7 @@ pub struct CachedTxInfo { pub struct FlashblockStateCache { inner: Arc>>, changeset_cache: ChangesetCache, - canon_in_memory_state: CanonicalInMemoryState, + pub(crate) canon_in_memory_state: CanonicalInMemoryState, } // FlashblockStateCache read interfaces diff --git a/crates/flashblocks/src/debug.rs b/crates/flashblocks/src/debug.rs new file mode 100644 index 00000000..8f4a06e8 --- /dev/null +++ b/crates/flashblocks/src/debug.rs @@ -0,0 +1,153 @@ +use crate::cache::FlashblockStateCache; +use reth_chain_state::ExecutedBlock; +use reth_primitives_traits::NodePrimitives; +use tracing::{debug, info, warn}; + +/// Captures the flashblocks and engine `ExecutedBlock`s synchronously (cheap Arc clones), +/// then spawns the heavy comparison on a blocking thread to avoid stalling the canonical +/// stream handler. +pub(crate) fn debug_compare_flashblocks_bundle_states( + flashblocks_state: &FlashblockStateCache, + block_number: u64, + block_hash: alloy_primitives::B256, +) { + // Capture data synchronously (before handle_canonical_block evicts the cache). + // These are cheap — ExecutedBlock internals are Arc'd. + let fb_block = flashblocks_state.get_pending_sequence().and_then(|seq| { + (seq.get_height() == block_number).then(|| seq.pending.executed_block.clone()) + }); + let engine_block = flashblocks_state + .canon_in_memory_state + .state_by_hash(block_hash) + .map(|state| state.block()); + + // Spawn the heavy comparison on a blocking thread so the canonical stream handler + // stays responsive. trie_data() is synchronous (parking_lot::Mutex, no async). + tokio::task::spawn_blocking(move || { + compare_executed_blocks(fb_block, engine_block, block_number); + }); +} + +/// Performs the deep comparison between flashblocks and engine `ExecutedBlock`s. +fn compare_executed_blocks( + fb_block: Option>, + engine_block: Option>, + block_number: u64, +) { + let (Some(fb), Some(eng)) = (fb_block, engine_block) else { + debug!( + target: "flashblocks::verify", + block_number, + "Skipping BundleState comparison (block not available in both caches)" + ); + return; + }; + + let fb_hash = fb.recovered_block.hash(); + let eng_hash = eng.recovered_block.hash(); + + let fb_bundle = &fb.execution_output.state; + let eng_bundle = &eng.execution_output.state; + + // Deep compare accounts: match by address, compare BundleAccount fields + let mut account_mismatches = Vec::new(); + let mut fb_only = Vec::new(); + let mut eng_only = Vec::new(); + for (addr, fb_acct) in &fb_bundle.state { + if let Some(eng_acct) = eng_bundle.state.get(addr) { + if fb_acct != eng_acct { + account_mismatches.push(*addr); + } + } else { + fb_only.push(*addr); + } + } + for addr in eng_bundle.state.keys() { + if !fb_bundle.state.contains_key(addr) { + eng_only.push(*addr); + } + } + + // Deep compare reverts: both should have exactly 1 entry after flattening. + // Match by address within each revert vec. + let mut revert_mismatches = Vec::new(); + let mut revert_fb_only = Vec::new(); + let mut revert_eng_only = Vec::new(); + if fb_bundle.reverts.len() == eng_bundle.reverts.len() { + for (fb_rev, eng_rev) in fb_bundle.reverts.iter().zip(eng_bundle.reverts.iter()) { + let fb_map: std::collections::HashMap<_, _> = + fb_rev.iter().map(|(a, r)| (a, r)).collect(); + let eng_map: std::collections::HashMap<_, _> = + eng_rev.iter().map(|(a, r)| (a, r)).collect(); + for (addr, fb_r) in &fb_map { + if let Some(eng_r) = eng_map.get(addr) { + if fb_r != eng_r { + revert_mismatches.push(**addr); + } + } else { + revert_fb_only.push(**addr); + } + } + for addr in eng_map.keys() { + if !fb_map.contains_key(addr) { + revert_eng_only.push(**addr); + } + } + } + } + + // Deep compare DeferredTrieData (hashed_state + trie_updates) + let fb_trie = fb.trie_data(); + let eng_trie = eng.trie_data(); + let hashed_state_match = *fb_trie.hashed_state == *eng_trie.hashed_state; + let trie_updates_match = *fb_trie.trie_updates == *eng_trie.trie_updates; + + let all_match = fb_hash == eng_hash + && account_mismatches.is_empty() + && fb_only.is_empty() + && eng_only.is_empty() + && fb_bundle.reverts.len() == eng_bundle.reverts.len() + && revert_mismatches.is_empty() + && revert_fb_only.is_empty() + && revert_eng_only.is_empty() + && hashed_state_match + && trie_updates_match; + + if all_match { + info!( + target: "flashblocks::verify", + block_number, + %fb_hash, + accounts = fb_bundle.state.len(), + reverts = fb_bundle.reverts.len(), + "BundleState + TrieData MATCH: flashblocks == engine" + ); + } else { + warn!( + target: "flashblocks::verify", + block_number, + fb_hash = %fb_hash, + eng_hash = %eng_hash, + hash_match = fb_hash == eng_hash, + fb_accounts = fb_bundle.state.len(), + eng_accounts = eng_bundle.state.len(), + account_mismatches = account_mismatches.len(), + fb_only_accounts = fb_only.len(), + eng_only_accounts = eng_only.len(), + fb_reverts = fb_bundle.reverts.len(), + eng_reverts = eng_bundle.reverts.len(), + revert_mismatches = revert_mismatches.len(), + revert_fb_only = revert_fb_only.len(), + revert_eng_only = revert_eng_only.len(), + hashed_state_match, + trie_updates_match, + "BundleState MISMATCH: flashblocks != engine" + ); + for addr in account_mismatches.iter().take(3) { + warn!(target: "flashblocks::verify", %addr, "Account state mismatch"); + } + for addr in revert_mismatches.iter().take(3) { + warn!(target: "flashblocks::verify", %addr, "Revert mismatch"); + } + } +} diff --git a/crates/flashblocks/src/lib.rs b/crates/flashblocks/src/lib.rs index 35953c24..f249e7d0 100644 --- a/crates/flashblocks/src/lib.rs +++ b/crates/flashblocks/src/lib.rs @@ -1,6 +1,7 @@ //! X-Layer flashblocks crate. mod cache; +mod debug; mod execution; mod persist; mod state; diff --git a/crates/flashblocks/src/service.rs b/crates/flashblocks/src/service.rs index 50169c9b..a0d080d9 100644 --- a/crates/flashblocks/src/service.rs +++ b/crates/flashblocks/src/service.rs @@ -86,6 +86,8 @@ pub struct FlashblocksRpcCtx pub chain_spec: Arc, /// Node engine tree configuration for the sequence validator. pub tree_config: TreeConfig, + /// Flashblocks RPC debug mode to enable state comparison. + pub debug_state_comparison: bool, } /// Context for handling flashblocks persistence and relaying. @@ -242,6 +244,7 @@ where self.flashblocks_state, raw_cache, task_queue, + self.rpc_ctx.debug_state_comparison, )), ); } diff --git a/crates/flashblocks/src/state.rs b/crates/flashblocks/src/state.rs index fb112e2e..7b956db1 100644 --- a/crates/flashblocks/src/state.rs +++ b/crates/flashblocks/src/state.rs @@ -1,5 +1,6 @@ use crate::{ cache::RawFlashblocksCache, + debug::debug_compare_flashblocks_bundle_states, execution::validator::FlashblockSequenceValidator, execution::{FlashblockReceipt, OverlayProviderFactory}, service::{ExecutionTaskQueue, ExecutionTaskQueueFlush, EXECUTION_TASK_QUEUE_CAPACITY}, @@ -203,6 +204,7 @@ pub async fn handle_canonical_stream( flashblocks_state: FlashblockStateCache, raw_cache: Arc>, task_queue: ExecutionTaskQueue, + debug_state_comparison: bool, ) { info!(target: "flashblocks", "Canonical state handler started"); while let Some(notification) = canon_rx.next().await { @@ -211,6 +213,15 @@ pub async fn handle_canonical_stream( let block_number = tip.number(); let is_reorg = notification.reverted().is_some(); + // Debug mode - state comparison between flashblocks RPC generated execution state vs + // engine payload validator's execution state (`BundleState` + reverts). + // + // Note that engine pre-warming must also be disabled so engine payload validator will + // compute the new payload independently. + if debug_state_comparison { + debug_compare_flashblocks_bundle_states(&flashblocks_state, block_number, block_hash); + } + raw_cache.handle_canonical_height(block_number); if flashblocks_state.handle_canonical_block((block_number, block_hash), is_reorg) { task_queue.flush(); From fb5a042d7424b11cdc2c4392cf65e70c9e397ceb Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 27 Mar 2026 18:37:05 +0800 Subject: [PATCH 64/76] refactor(flashblocks): add get_executed_block_by_number for debug lookup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Expose a unified lookup across pending and confirm caches for ExecutedBlock by block number, simplifying the debug comparison to a single call instead of inline pending sequence matching. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/cache/confirm.rs | 8 ++++++++ crates/flashblocks/src/cache/mod.rs | 15 +++++++++++++++ crates/flashblocks/src/debug.rs | 4 +--- 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index ee1ca102..0ab4a0b2 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -135,6 +135,14 @@ impl ConfirmCache { self.blocks.get(&block_number).map(|(_, entry)| entry.to_block_and_receipts()) } + /// Returns the `ExecutedBlock` for the given block number, if present. + pub(crate) fn get_executed_block_by_number( + &self, + block_number: u64, + ) -> Option> { + self.blocks.get(&block_number).map(|(_, entry)| entry.executed_block.clone()) + } + /// Returns the cached transaction info for the given tx hash, if present. pub(crate) fn get_tx_info( &self, diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index c8ed85ee..aabaefe8 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -92,6 +92,21 @@ impl FlashblockStateCache { self.inner.read().confirm_height } + /// Returns the `ExecutedBlock` for the given block number from pending or confirm cache. + /// Used for diagnostic comparison with the engine's execution. + pub fn get_executed_block_by_number( + &self, + block_number: u64, + ) -> Option> { + let guard = self.inner.read(); + if let Some(seq) = guard.pending_cache.as_ref() + && seq.get_height() == block_number + { + return Some(seq.pending.executed_block.clone()); + } + guard.confirm_cache.get_executed_block_by_number(block_number) + } + /// Return the current canonical height, if any. pub fn get_canon_height(&self) -> u64 { self.inner.read().canon_info.0 diff --git a/crates/flashblocks/src/debug.rs b/crates/flashblocks/src/debug.rs index 8f4a06e8..c8822bee 100644 --- a/crates/flashblocks/src/debug.rs +++ b/crates/flashblocks/src/debug.rs @@ -13,9 +13,7 @@ pub(crate) fn debug_compare_flashblocks_bundle_states Date: Fri, 27 Mar 2026 18:38:49 +0800 Subject: [PATCH 65/76] Refactor --- crates/flashblocks/src/cache/confirm.rs | 16 ++++++------- crates/flashblocks/src/cache/mod.rs | 30 ++++++++++++------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs index 0ab4a0b2..731c510a 100644 --- a/crates/flashblocks/src/cache/confirm.rs +++ b/crates/flashblocks/src/cache/confirm.rs @@ -135,14 +135,6 @@ impl ConfirmCache { self.blocks.get(&block_number).map(|(_, entry)| entry.to_block_and_receipts()) } - /// Returns the `ExecutedBlock` for the given block number, if present. - pub(crate) fn get_executed_block_by_number( - &self, - block_number: u64, - ) -> Option> { - self.blocks.get(&block_number).map(|(_, entry)| entry.executed_block.clone()) - } - /// Returns the cached transaction info for the given tx hash, if present. pub(crate) fn get_tx_info( &self, @@ -191,6 +183,14 @@ impl ConfirmCache { .collect()) } + /// Returns the `ExecutedBlock` for the given block number, if present. + pub(crate) fn get_executed_block_by_number( + &self, + block_number: u64, + ) -> Option> { + self.blocks.get(&block_number).map(|(_, entry)| entry.executed_block.clone()) + } + /// Removes all tx index entries for the transactions in the given block. fn remove_tx_index_for_block(&mut self, block: &ConfirmedBlock) { for tx in block.executed_block.recovered_block.body().transactions() { diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index aabaefe8..8483d9c8 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -92,21 +92,6 @@ impl FlashblockStateCache { self.inner.read().confirm_height } - /// Returns the `ExecutedBlock` for the given block number from pending or confirm cache. - /// Used for diagnostic comparison with the engine's execution. - pub fn get_executed_block_by_number( - &self, - block_number: u64, - ) -> Option> { - let guard = self.inner.read(); - if let Some(seq) = guard.pending_cache.as_ref() - && seq.get_height() == block_number - { - return Some(seq.pending.executed_block.clone()); - } - guard.confirm_cache.get_executed_block_by_number(block_number) - } - /// Return the current canonical height, if any. pub fn get_canon_height(&self) -> u64 { self.inner.read().canon_info.0 @@ -279,6 +264,21 @@ impl FlashblockStateCache { } Ok(Some((overlay, header.expect("valid cached header"), anchor_hash))) } + + /// Returns the `ExecutedBlock` for the given block number from pending or confirm cache. + /// Used for diagnostic comparison with the engine's execution. + pub fn get_executed_block_by_number( + &self, + block_number: u64, + ) -> Option> { + let guard = self.inner.read(); + if let Some(seq) = guard.pending_cache.as_ref() + && seq.get_height() == block_number + { + return Some(seq.pending.executed_block.clone()); + } + guard.confirm_cache.get_executed_block_by_number(block_number) + } } // FlashblockStateCache state mutation interfaces. From a36bca793a14a33806df007009b32de00d1e6920 Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 27 Mar 2026 19:15:14 +0800 Subject: [PATCH 66/76] refactor(flashblocks): remove trie data comparison from debug output MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drop DeferredTrieData comparison (hashed_state + trie_updates) from the debug state comparison since trie data is computed independently by the engine and not meaningful for execution output equivalence checks. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/debug.rs | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/crates/flashblocks/src/debug.rs b/crates/flashblocks/src/debug.rs index c8822bee..eaf6a17b 100644 --- a/crates/flashblocks/src/debug.rs +++ b/crates/flashblocks/src/debug.rs @@ -94,12 +94,6 @@ fn compare_executed_blocks( } } - // Deep compare DeferredTrieData (hashed_state + trie_updates) - let fb_trie = fb.trie_data(); - let eng_trie = eng.trie_data(); - let hashed_state_match = *fb_trie.hashed_state == *eng_trie.hashed_state; - let trie_updates_match = *fb_trie.trie_updates == *eng_trie.trie_updates; - let all_match = fb_hash == eng_hash && account_mismatches.is_empty() && fb_only.is_empty() @@ -107,9 +101,7 @@ fn compare_executed_blocks( && fb_bundle.reverts.len() == eng_bundle.reverts.len() && revert_mismatches.is_empty() && revert_fb_only.is_empty() - && revert_eng_only.is_empty() - && hashed_state_match - && trie_updates_match; + && revert_eng_only.is_empty(); if all_match { info!( @@ -118,7 +110,7 @@ fn compare_executed_blocks( %fb_hash, accounts = fb_bundle.state.len(), reverts = fb_bundle.reverts.len(), - "BundleState + TrieData MATCH: flashblocks == engine" + "Execution output MATCH: flashblocks == engine" ); } else { warn!( @@ -137,9 +129,7 @@ fn compare_executed_blocks( revert_mismatches = revert_mismatches.len(), revert_fb_only = revert_fb_only.len(), revert_eng_only = revert_eng_only.len(), - hashed_state_match, - trie_updates_match, - "BundleState MISMATCH: flashblocks != engine" + "Execution output MISMATCH: flashblocks != engine" ); for addr in account_mismatches.iter().take(3) { warn!(target: "flashblocks::verify", %addr, "Account state mismatch"); From e42e44f9a25973909956835901ddbc9a850c1812 Mon Sep 17 00:00:00 2001 From: Niven Date: Fri, 27 Mar 2026 19:44:00 +0800 Subject: [PATCH 67/76] Better logging --- crates/flashblocks/src/execution/validator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index db102927..953ddbac 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -482,7 +482,7 @@ where let pending_last_index = pending.prefix_execution_meta.last_flashblock_index; if pending_last_index >= incoming_last_index { return Err(eyre::eyre!( - "flashblock index mismatch: incoming={incoming_last_index}, pending={pending_last_index}" + "skipping, flashblock index already validated: incoming={incoming_last_index}, pending={pending_last_index}" )); } return Ok(Some(pending)); From 60a1346505da128ed9d2d0b9a9bbff9d8bd08e67 Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 30 Mar 2026 14:02:14 +0800 Subject: [PATCH 68/76] fix(flashblocks): restore hashed_state comparison and send empty trie_updates to engine MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Re-add hashed_state comparison to debug output since it validates the incremental BundleState produces the same hashed diff as fresh execution — critical for pre-warm correctness. Send empty trie_updates in the pre-warm payload instead of the incremental PreservedSparseTrie output, which produces incomplete trie_updates missing prefix leaf changes folded into pruned branches. The engine computes its own trie data fresh from hashed_state. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/debug.rs | 11 ++++++++++- crates/flashblocks/src/service.rs | 6 +++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/crates/flashblocks/src/debug.rs b/crates/flashblocks/src/debug.rs index eaf6a17b..9744556c 100644 --- a/crates/flashblocks/src/debug.rs +++ b/crates/flashblocks/src/debug.rs @@ -94,6 +94,13 @@ fn compare_executed_blocks( } } + // Compare hashed_state (the state diff input to trie computation). + // This confirms the incremental BundleState produces the same hashed diff + // as a fresh execution — critical since we send hashed_state to the engine pre-warm. + let fb_trie = fb.trie_data(); + let eng_trie = eng.trie_data(); + let hashed_state_match = *fb_trie.hashed_state == *eng_trie.hashed_state; + let all_match = fb_hash == eng_hash && account_mismatches.is_empty() && fb_only.is_empty() @@ -101,7 +108,8 @@ fn compare_executed_blocks( && fb_bundle.reverts.len() == eng_bundle.reverts.len() && revert_mismatches.is_empty() && revert_fb_only.is_empty() - && revert_eng_only.is_empty(); + && revert_eng_only.is_empty() + && hashed_state_match; if all_match { info!( @@ -129,6 +137,7 @@ fn compare_executed_blocks( revert_mismatches = revert_mismatches.len(), revert_fb_only = revert_fb_only.len(), revert_eng_only = revert_eng_only.len(), + hashed_state_match, "Execution output MISMATCH: flashblocks != engine" ); for addr in account_mismatches.iter().take(3) { diff --git a/crates/flashblocks/src/service.rs b/crates/flashblocks/src/service.rs index 59c0abed..e8c144ff 100644 --- a/crates/flashblocks/src/service.rs +++ b/crates/flashblocks/src/service.rs @@ -279,7 +279,11 @@ where recovered_block: executed.recovered_block.clone(), execution_output: executed.execution_output.clone(), hashed_state: Either::Right(trie_data.hashed_state), - trie_updates: Either::Right(trie_data.trie_updates), + // Send empty trie_updates to avoid polluting the engine's overlay trie + // state. The incremental PreservedSparseTrie produces incomplete + // trie_updates (missing prefix leaf changes folded into pruned branches). + // The engine will compute its own trie data fresh from hashed_state. + trie_updates: Either::Right(Arc::default()), }; // Use default zero id — to avoid accumulating stale entries in the engine state tree. let payload = OpBuiltPayload::::new( From 5e8dd9cfc3f03a0e21f658f5d84a59ba3b0e2479 Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 30 Mar 2026 15:12:06 +0800 Subject: [PATCH 69/76] fix(flashblocks): check incoming canon hash to ensure confirm states matches MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/cache/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 8483d9c8..845ef06c 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -454,7 +454,9 @@ impl FlashblockStateCacheInner { fn handle_canonical_block(&mut self, canon_info: (u64, B256), reorg: bool) -> bool { let pending_stale = self.pending_cache.as_ref().is_some_and(|p| p.get_height() <= canon_info.0); - let flush = pending_stale || reorg; + let hash_mismatch = self.confirm_cache.number_for_hash(&canon_info.1).is_none() + && self.confirm_cache.get_block_by_number(canon_info.0).is_some(); + let flush = pending_stale || hash_mismatch || reorg; if flush { warn!( target: "flashblocks", @@ -462,6 +464,7 @@ impl FlashblockStateCacheInner { confirm_height = self.confirm_height, canonical_reorg = reorg, pending_stale, + hash_mismatch, "Reorg or pending stale detected on handle canonical block", ); self.flush(); @@ -472,7 +475,6 @@ impl FlashblockStateCacheInner { confirm_height = self.confirm_height, "Evicting flashblocks state inner cache" ); - self.confirm_cache.flush_up_to_height(canon_info.0); } // Update state heights From bb8aa3ad9a6af249896388968099f230bb6a2382 Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 30 Mar 2026 15:31:18 +0800 Subject: [PATCH 70/76] chore(flashblocks): improve execution validator logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/execution/validator.rs | 28 ++++++++++++++----- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index 953ddbac..ed342aec 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -197,6 +197,7 @@ where debug!( target: "flashblocks::validator", + execute_height = args.base.block_number, ?strategy, "Decided which state root algorithm to run" ); @@ -280,6 +281,7 @@ where .inspect_err(|_| { tracing::error!( target: "flashblocks::validator", + execute_height = args.base.block_number, "Receipt root task dropped sender without result, receipt root calculation likely aborted" ); })? @@ -296,19 +298,20 @@ where let mut maybe_state_root = None; match strategy { StateRootStrategy::StateRootTask => { - debug!(target: "flashblocks::validator", "Using sparse trie state root algorithm"); + debug!(target: "flashblocks::validator", execute_height = args.base.block_number, "Using sparse trie state root algorithm"); let task_result = self.await_state_root_with_timeout( &mut handle, overlay_factory.clone(), &hashed_state, + args.base.block_number, )?; match task_result { Ok(StateRootComputeOutcome { state_root, trie_updates }) => { let elapsed = root_time.elapsed(); maybe_state_root = Some((state_root, trie_updates)); - info!(target: "flashblocks::validator", ?state_root, ?elapsed, "State root task finished"); + info!(target: "flashblocks::validator", execute_height = args.base.block_number, ?state_root, ?elapsed, "State root task finished"); } Err(error) => { debug!(target: "flashblocks::validator", %error, "State root task failed"); @@ -316,12 +319,13 @@ where } } StateRootStrategy::Parallel => { - debug!(target: "flashblocks::validator", "Using parallel state root algorithm"); + debug!(target: "flashblocks::validator", execute_height = args.base.block_number, "Using parallel state root algorithm"); match self.compute_state_root_parallel(overlay_factory.clone(), &hashed_state) { Ok(result) => { let elapsed = root_time.elapsed(); info!( target: "flashblocks::validator", + execute_height = args.base.block_number, regular_state_root = ?result.0, ?elapsed, "Regular root task finished" @@ -329,7 +333,7 @@ where maybe_state_root = Some((result.0, result.1)); } Err(error) => { - debug!(target: "flashblocks::validator", %error, "Parallel state root computation failed"); + debug!(target: "flashblocks::validator", execute_height = args.base.block_number, err = %error, "Parallel state root computation failed"); } } } @@ -343,7 +347,7 @@ where maybe_state_root } else { // fallback is to compute the state root regularly in sync - warn!(target: "flashblocks::validator", "Failed to compute state root"); + warn!(target: "flashblocks::validator", execute_height = args.base.block_number, "Failed to compute state root"); let (root, updates) = Self::compute_state_root_serial(overlay_factory.clone(), &hashed_state)?; (root, updates) @@ -536,6 +540,7 @@ where where T: ExecutableTxFor + ExecutableTxParts, N::SignedTx>, Err: core::error::Error + Send + Sync + 'static, + N::SignedTx: TxHashRef, EvmConfig: ConfigureEvm + Unpin> + 'static, { @@ -585,12 +590,14 @@ where } // Execute all transactions and finalize + let execute_height = parent_header.number() + 1; let (executor, suffix_senders, suffix_receipts) = self.execute_transactions( executor, pending_sequence, transaction_count, handle, &receipt_tx, + execute_height, )?; drop(receipt_tx); @@ -649,7 +656,7 @@ where } let output = BlockExecutionOutput { result, state: bundle }; - debug!(target: "flashblocks::validator", "Executed block"); + debug!(target: "flashblocks::validator", execute_height, "Executed block"); Ok((output, senders, result_rx, read_cache)) } @@ -662,6 +669,7 @@ where transaction_count: usize, handle: &mut PayloadHandle, receipt_tx: &crossbeam_channel::Sender>, + execute_height: u64, ) -> eyre::Result<(Executor, Vec
, Vec), BlockExecutionError> where T: ExecutableTxFor @@ -671,6 +679,7 @@ where >, Executor: BlockExecutor, Err: core::error::Error + Send + Sync + 'static, + N::SignedTx: TxHashRef, EvmConfig: ConfigureEvm + Unpin> + 'static, { @@ -702,7 +711,7 @@ where let tx_signer = *tx.signer(); senders.push(tx_signer); - trace!(target: "flashblocks::validator", "Executing transaction"); + trace!(target: "flashblocks::validator", execute_height, txhash = %tx.tx().tx_hash(), "Executing transaction"); executor.execute_transaction(tx)?; let current_len = executor.receipts().len(); @@ -786,6 +795,7 @@ where handle: &mut PayloadHandle, overlay_factory: OverlayStateProviderFactory, hashed_state: &HashedPostState, + execute_height: u64, ) -> eyre::Result> { let Some(timeout) = self.tree_config.state_root_task_timeout() else { return Ok(handle.state_root()); @@ -801,6 +811,7 @@ where Err(RecvTimeoutError::Timeout) => { warn!( target: "flashblocks::validator", + execute_height, ?timeout, "State root task timed out, spawning sequential fallback" ); @@ -823,6 +834,7 @@ where debug!( target: "flashblocks::validator", source = "task", + execute_height, "State root timeout race won" ); return Ok(result); @@ -830,6 +842,7 @@ where Err(RecvTimeoutError::Disconnected) => { debug!( target: "flashblocks::validator", + execute_height, "State root task dropped, waiting for sequential fallback" ); let result = seq_rx.recv().map_err(|_| { @@ -847,6 +860,7 @@ where debug!( target: "flashblocks::validator", source = "sequential", + execute_height, "State root timeout race won" ); let (state_root, trie_updates) = result?; From 6042ba7d581193247eb64fa7892a9899094e7b02 Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 30 Mar 2026 17:22:28 +0800 Subject: [PATCH 71/76] fix(flashblocks): fix state trie updates MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/cache/mod.rs | 6 ++-- crates/flashblocks/src/cache/pending.rs | 8 +++++ crates/flashblocks/src/execution/mod.rs | 3 ++ crates/flashblocks/src/execution/validator.rs | 36 +++++++++++-------- crates/flashblocks/src/service.rs | 20 +++++++---- 5 files changed, 49 insertions(+), 24 deletions(-) diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 845ef06c..fdd99867 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -299,9 +299,8 @@ impl FlashblockStateCache { pub fn handle_pending_sequence( &self, pending_sequence: PendingSequence, - target_index: u64, ) -> eyre::Result<()> { - self.inner.write().handle_pending_sequence(pending_sequence, target_index) + self.inner.write().handle_pending_sequence(pending_sequence) } /// Handles a canonical block committed to the canonical chainstate. @@ -408,14 +407,13 @@ impl FlashblockStateCacheInner { fn handle_pending_sequence( &mut self, pending_sequence: PendingSequence, - target_index: u64, ) -> eyre::Result<()> { let pending_height = pending_sequence.get_height(); let expected_height = self.confirm_height + 1; if pending_height == expected_height { let incoming_seq = pending_sequence.clone(); - if target_index > 0 && pending_sequence.get_last_flashblock_index() >= target_index { + if pending_sequence.is_target_flashblock() { // Target flashblock. Promote to confirm, and clear pending state self.handle_confirmed_block( expected_height, diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs index 5d9361ac..619250e9 100644 --- a/crates/flashblocks/src/cache/pending.rs +++ b/crates/flashblocks/src/cache/pending.rs @@ -23,6 +23,8 @@ pub struct PendingSequence { pub parent_header: SealedHeader, /// Prefix execution metadata for incremental builds. pub prefix_execution_meta: PrefixExecutionMeta, + /// Target index of the latest flashblock in the sequence. + pub target_index: u64, } impl PendingSequence { @@ -48,6 +50,10 @@ impl PendingSequence { pub fn get_last_flashblock_index(&self) -> u64 { self.prefix_execution_meta.last_flashblock_index } + + pub fn is_target_flashblock(&self) -> bool { + self.target_index > 0 && self.get_last_flashblock_index() >= self.target_index + } } #[cfg(test)] @@ -73,6 +79,7 @@ mod tests { block_hash, parent_header: Default::default(), prefix_execution_meta: Default::default(), + target_index: 0, } } @@ -104,6 +111,7 @@ mod tests { block_hash, parent_header: Default::default(), prefix_execution_meta: Default::default(), + target_index: 0, } } diff --git a/crates/flashblocks/src/execution/mod.rs b/crates/flashblocks/src/execution/mod.rs index ba622c1f..24b5bd6a 100644 --- a/crates/flashblocks/src/execution/mod.rs +++ b/crates/flashblocks/src/execution/mod.rs @@ -13,6 +13,7 @@ use reth_provider::{ StageCheckpointReader, StorageChangeSetReader, StorageSettingsCache, }; use reth_revm::cached::CachedReads; +use reth_trie::updates::TrieUpdates; pub(crate) struct BuildArgs { pub(crate) base: OpFlashblockPayloadBase, @@ -38,6 +39,8 @@ pub struct PrefixExecutionMeta { pub(crate) blob_gas_used: u64, /// The last flashblock index of the latest flashblocks sequence. pub(crate) last_flashblock_index: u64, + /// Accumulated trie updates across sequence incremental executions. + pub(crate) accumulated_trie_updates: TrieUpdates, } /// Strategy describing how to compute the state root. diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs index ed342aec..77371ae4 100644 --- a/crates/flashblocks/src/execution/validator.rs +++ b/crates/flashblocks/src/execution/validator.rs @@ -174,7 +174,8 @@ where let mut state_provider = provider_builder.build()?; // For incremental builds, use the previous index's computed state root so the incremental - // prefix trie nodes (PreservedSparseTrie) are re-used. + // prefix trie nodes (PreservedSparseTrie) are re-used, to ensure SR calculation is only + // done on suffix changes and optimized. let parent_state_root = header.state_root(); let parent_header = pending_sequence.as_ref().map_or(header, |seq| seq.parent_header.clone()); @@ -357,6 +358,13 @@ where let prefix_gas_used = output.result.gas_used; let prefix_blob_gas_used = output.result.blob_gas_used; + // Accumulate trie_updates across sequence incremental executions. + let mut accumulated_trie_updates = pending_sequence + .as_ref() + .map(|seq| seq.prefix_execution_meta.accumulated_trie_updates.clone()) + .unwrap_or_default(); + accumulated_trie_updates.extend(trie_output.clone()); + // Assemble the block using pre-computed roots (avoids recomputation). let block = assemble_flashblock( self.chain_spec.as_ref(), @@ -383,6 +391,7 @@ where block, output, hashed_state, + // Only pass prefix trie updates to the deferred trie task trie_output, overlay_data, overlay_factory, @@ -405,6 +414,7 @@ where gas_used: prefix_gas_used, blob_gas_used: prefix_blob_gas_used, last_flashblock_index: args.last_flashblock_index, + accumulated_trie_updates, }, block_transaction_count, args.target_index, @@ -440,20 +450,18 @@ where ); } - self.flashblocks_state.handle_pending_sequence( - PendingSequence { - // Set pending block deadline to 1 second matching default blocktime. - pending: PendingBlock::with_executed_block( - Instant::now() + Duration::from_secs(1), - executed_block, - ), - tx_index, - block_hash, - parent_header, - prefix_execution_meta, - }, + self.flashblocks_state.handle_pending_sequence(PendingSequence { + // Set pending block deadline to 1 second matching default blocktime. + pending: PendingBlock::with_executed_block( + Instant::now() + Duration::from_secs(1), + executed_block, + ), + tx_index, + block_hash, + parent_header, + prefix_execution_meta, target_index, - ) + }) } fn prevalidate_incoming_sequence< diff --git a/crates/flashblocks/src/service.rs b/crates/flashblocks/src/service.rs index e8c144ff..ec611f8d 100644 --- a/crates/flashblocks/src/service.rs +++ b/crates/flashblocks/src/service.rs @@ -268,22 +268,30 @@ where use reth_payload_builder_primitives::Events; while pending_rx.changed().await.is_ok() { - let Some(pending_sequence) = pending_rx.borrow_and_update().clone() else { + let Some(pending_sequence) = pending_rx + .borrow_and_update() + .clone() + .filter(|s| s.is_target_flashblock()) + else { continue; }; let executed = &pending_sequence.executed_block; let block = executed.recovered_block.clone_sealed_block(); let trie_data = executed.trie_data(); + // Use accumulated trie_updates from all incremental sequence executions. + let accumulated_trie_updates = Arc::new( + pending_sequence + .prefix_execution_meta + .accumulated_trie_updates + .clone() + .into_sorted(), + ); let built = reth_payload_primitives::BuiltPayloadExecutedBlock:: { recovered_block: executed.recovered_block.clone(), execution_output: executed.execution_output.clone(), hashed_state: Either::Right(trie_data.hashed_state), - // Send empty trie_updates to avoid polluting the engine's overlay trie - // state. The incremental PreservedSparseTrie produces incomplete - // trie_updates (missing prefix leaf changes folded into pruned branches). - // The engine will compute its own trie data fresh from hashed_state. - trie_updates: Either::Right(Arc::default()), + trie_updates: Either::Right(accumulated_trie_updates), }; // Use default zero id — to avoid accumulating stale entries in the engine state tree. let payload = OpBuiltPayload::::new( From 34ab520248ea6106c82dc31080463904bca5949f Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 30 Mar 2026 17:51:50 +0800 Subject: [PATCH 72/76] feat(flashblocks): add debug state comparison for accumulated trie updates MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/debug.rs | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/crates/flashblocks/src/debug.rs b/crates/flashblocks/src/debug.rs index 9744556c..781deb55 100644 --- a/crates/flashblocks/src/debug.rs +++ b/crates/flashblocks/src/debug.rs @@ -18,11 +18,16 @@ pub(crate) fn debug_compare_flashblocks_bundle_states( fb_block: Option>, engine_block: Option>, block_number: u64, + fb_trie_updates: Option, ) { let (Some(fb), Some(eng)) = (fb_block, engine_block) else { debug!( @@ -147,4 +153,25 @@ fn compare_executed_blocks( warn!(target: "flashblocks::verify", %addr, "Revert mismatch"); } } + + // Compare accumulated trie_updates (merged across all flashblock indices) with + // the engine's fresh trie_updates. This validates that the incremental accumulation + // via TrieUpdates::extend() produces the same result as a fresh single-pass computation. + let Some(fb_updates) = fb_trie_updates else { + return; + }; + let fb_sorted = fb_updates.into_sorted(); + if fb_sorted == *eng_trie.trie_updates { + info!( + target: "flashblocks::verify", + block_number, + "Trie updates MATCH: flashblocks == engine" + ); + } else { + warn!( + target: "flashblocks::verify", + block_number, + "Trie updates MISMATCH: flashblocks != engine" + ); + } } From 09c6b9d67c496a976f749464906c0cc986a36646 Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 30 Mar 2026 18:37:37 +0800 Subject: [PATCH 73/76] feat(flashblocks): add debug state comparison for accumulated trie updates MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/debug.rs | 19 +++++++------ crates/flashblocks/src/state.rs | 48 ++++++++++++++++++++++++++++----- 2 files changed, 52 insertions(+), 15 deletions(-) diff --git a/crates/flashblocks/src/debug.rs b/crates/flashblocks/src/debug.rs index 781deb55..01d4e9f0 100644 --- a/crates/flashblocks/src/debug.rs +++ b/crates/flashblocks/src/debug.rs @@ -10,6 +10,7 @@ pub(crate) fn debug_compare_flashblocks_bundle_states, block_number: u64, block_hash: alloy_primitives::B256, + mut fb_trie_updates: Option, ) { // Capture data synchronously (before handle_canonical_block evicts the cache). // These are cheap — ExecutedBlock internals are Arc'd. @@ -18,11 +19,14 @@ pub(crate) fn debug_compare_flashblocks_bundle_states( fb_block: Option>, engine_block: Option>, block_number: u64, - fb_trie_updates: Option, + fb_trie_updates: Option, ) { let (Some(fb), Some(eng)) = (fb_block, engine_block) else { debug!( @@ -160,8 +164,7 @@ fn compare_executed_blocks( let Some(fb_updates) = fb_trie_updates else { return; }; - let fb_sorted = fb_updates.into_sorted(); - if fb_sorted == *eng_trie.trie_updates { + if fb_updates == *eng_trie.trie_updates { info!( target: "flashblocks::verify", block_number, diff --git a/crates/flashblocks/src/state.rs b/crates/flashblocks/src/state.rs index e1bd1157..543a6cb1 100644 --- a/crates/flashblocks/src/state.rs +++ b/crates/flashblocks/src/state.rs @@ -206,20 +206,54 @@ pub async fn handle_canonical_stream( task_queue: ExecutionTaskQueue, debug_state_comparison: bool, ) { + let mut trie_updates = None; + let mut pending_rx = if debug_state_comparison { + Some(flashblocks_state.subscribe_pending_sequence()) + } else { + None + }; + info!(target: "flashblocks", "Canonical state handler started"); - while let Some(notification) = canon_rx.next().await { + loop { + // Use select! to race canonical notifications with pending sequence updates. + // Pending sequence updates are only processed in debug mode to capture + // accumulated trie_updates before the block is promoted to confirm. + let notification = if let Some(ref mut rx) = pending_rx { + tokio::select! { + result = canon_rx.next() => { + match result { + Some(notification) => notification, + None => break, + } + }, + Ok(()) = rx.changed() => { + if let Some(seq) = rx.borrow_and_update().as_ref() + .filter(|s| s.is_target_flashblock()) + { + trie_updates = Some(seq.prefix_execution_meta.accumulated_trie_updates.clone().into_sorted()); + } + continue; + } + } + } else { + match canon_rx.next().await { + Some(n) => n, + None => break, + } + }; + let tip = notification.tip(); let block_hash = tip.hash(); let block_number = tip.number(); let is_reorg = notification.reverted().is_some(); - // Debug mode - state comparison between flashblocks RPC generated execution state vs - // engine payload validator's execution state (`BundleState` + reverts). - // - // Note that engine pre-warming must also be disabled so engine payload validator will - // compute the new payload independently. if debug_state_comparison { - debug_compare_flashblocks_bundle_states(&flashblocks_state, block_number, block_hash); + debug_compare_flashblocks_bundle_states( + &flashblocks_state, + block_number, + block_hash, + trie_updates.take(), + ); } raw_cache.handle_canonical_height(block_number); From a8146939ee3e0ea5cb9dbabbb7a153f5d748405f Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 30 Mar 2026 18:59:16 +0800 Subject: [PATCH 74/76] chore(flashblocks): remove pre-warm flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- bin/node/src/args.rs | 8 -------- bin/node/src/main.rs | 2 +- crates/flashblocks/src/debug.rs | 6 ++++-- crates/flashblocks/src/state.rs | 4 ++-- 4 files changed, 7 insertions(+), 13 deletions(-) diff --git a/bin/node/src/args.rs b/bin/node/src/args.rs index 0aadc8ca..b5df985a 100644 --- a/bin/node/src/args.rs +++ b/bin/node/src/args.rs @@ -267,14 +267,6 @@ pub struct FlashblocksRpcArgs { default_value = "false" )] pub flashblocks_debug_state_comparison: bool, - - /// Disable flashblocks RPC pre-warming engine state - #[arg( - long = "xlayer.flashblocks-disable-pre-warming", - help = "Disable flashblocks RPC pre-warming engine state", - default_value = "false" - )] - pub flashblocks_disable_pre_warming: bool, } #[cfg(test)] diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index 4df95ac0..73cbf77c 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -171,7 +171,7 @@ fn main() { relay_flashblocks: args.rollup_args.flashblocks_url.is_some(), }, )?; - if !args.xlayer_args.flashblocks_rpc.flashblocks_disable_pre_warming { + if !args.xlayer_args.flashblocks_rpc.flashblocks_debug_state_comparison { service.spawn_prewarm(events_sender); } service.spawn_persistence()?; diff --git a/crates/flashblocks/src/debug.rs b/crates/flashblocks/src/debug.rs index 01d4e9f0..e73ecc2c 100644 --- a/crates/flashblocks/src/debug.rs +++ b/crates/flashblocks/src/debug.rs @@ -21,10 +21,12 @@ pub(crate) fn debug_compare_flashblocks_bundle_states( if let Some(seq) = rx.borrow_and_update().as_ref() .filter(|s| s.is_target_flashblock()) { - trie_updates = Some(seq.prefix_execution_meta.accumulated_trie_updates.clone().into_sorted()); + trie_updates = Some((seq.get_height(), seq.prefix_execution_meta.accumulated_trie_updates.clone().into_sorted())); } continue; } @@ -252,7 +252,7 @@ pub async fn handle_canonical_stream( &flashblocks_state, block_number, block_hash, - trie_updates.take(), + trie_updates.take().filter(|t| t.0 == block_number).map(|t| t.1), ); } From 84612778a15b124df52574de05febe3936e3235e Mon Sep 17 00:00:00 2001 From: Niven Date: Mon, 30 Mar 2026 19:15:10 +0800 Subject: [PATCH 75/76] fix(flashblocks): fix debug compare MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/cache/mod.rs | 3 ++- crates/flashblocks/src/debug.rs | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index fdd99867..1b05efbb 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -267,13 +267,14 @@ impl FlashblockStateCache { /// Returns the `ExecutedBlock` for the given block number from pending or confirm cache. /// Used for diagnostic comparison with the engine's execution. - pub fn get_executed_block_by_number( + pub fn debug_get_executed_block_by_number( &self, block_number: u64, ) -> Option> { let guard = self.inner.read(); if let Some(seq) = guard.pending_cache.as_ref() && seq.get_height() == block_number + && seq.is_target_flashblock() { return Some(seq.pending.executed_block.clone()); } diff --git a/crates/flashblocks/src/debug.rs b/crates/flashblocks/src/debug.rs index e73ecc2c..ce03d0f5 100644 --- a/crates/flashblocks/src/debug.rs +++ b/crates/flashblocks/src/debug.rs @@ -14,7 +14,7 @@ pub(crate) fn debug_compare_flashblocks_bundle_states Date: Mon, 30 Mar 2026 20:39:33 +0800 Subject: [PATCH 76/76] fix: Add committing pending state to confirm on canon block MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.6 (1M context) --- crates/flashblocks/src/cache/mod.rs | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs index 1b05efbb..ff52ef21 100644 --- a/crates/flashblocks/src/cache/mod.rs +++ b/crates/flashblocks/src/cache/mod.rs @@ -307,8 +307,8 @@ impl FlashblockStateCache { /// Handles a canonical block committed to the canonical chainstate. /// /// This method will flush the confirm cache up to the canonical block height and - /// the pending state if it matches the committed block to ensure flashblocks state - /// cache memory does not grow unbounded. + /// commits the pending state to the confirm cache if it matches the committed block. + /// This ensures that the flashblocks state cache memory does not grow unbounded. /// /// It also detects chainstate re-orgs (set with re-org arg flag) and flashblocks /// state cache pollution. By default once error is detected, we will automatically @@ -427,8 +427,8 @@ impl FlashblockStateCacheInner { self.pending_cache = Some(incoming_seq); } } else if pending_height == expected_height + 1 { - // The next block's flashblock arrived. Somehow target flashblocks was - // missed. Promote current pending to confirm, and set incoming as new + // The next block's flashblock arrived. The target flashblocks was missed on + // the builder. Promote current pending to confirm, and set incoming as new // pending sequence. let sequence = self.pending_cache.take().ok_or_else(|| { eyre::eyre!( @@ -451,6 +451,24 @@ impl FlashblockStateCacheInner { } fn handle_canonical_block(&mut self, canon_info: (u64, B256), reorg: bool) -> bool { + // If the pending sequence matches the canonical block exactly, the target flashblocks + // was missed on the builder. Promote the current pending to confirm, and set the + // pending state to none. + if let Some(sequence) = self.pending_cache.as_ref() + && sequence.get_height() == canon_info.0 + && sequence.get_hash() == canon_info.1 + && canon_info.0 == self.confirm_height + 1 + { + let sequence = self.pending_cache.take().expect("just confirmed is_some"); + if let Err(e) = self.handle_confirmed_block( + canon_info.0, + sequence.pending.executed_block, + sequence.pending.receipts, + ) { + warn!(target: "flashblocks", err = %e, "Canonical block handle failed to promote pending sequence to confirm"); + } + } + let pending_stale = self.pending_cache.as_ref().is_some_and(|p| p.get_height() <= canon_info.0); let hash_mismatch = self.confirm_cache.number_for_hash(&canon_info.1).is_none()