diff --git a/Cargo.lock b/Cargo.lock index 4d8f3cb7..dfc6099b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -728,9 +728,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.7.3" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2ce1e0dbf7720eee747700e300c99aac01b1a95bb93f493a01e78ee28bb1a37" +checksum = "9e6d631f8b975229361d8af7b2c749af31c73b3cf1352f90e144ddb06227105e" dependencies = [ "alloy-primitives", "arbitrary", @@ -8114,7 +8114,7 @@ dependencies = [ "reth-node-api", "reth-primitives-traits", "reth-tracing", - "ringbuffer 0.16.0", + "ringbuffer", "serde", "serde_json", "tokio", @@ -9597,7 +9597,7 @@ dependencies = [ "reth-rpc-eth-types", "reth-storage-api", "reth-tasks", - "ringbuffer 0.16.0", + "ringbuffer", "serde_json", "tokio", "tokio-tungstenite 0.28.0", @@ -11072,12 +11072,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "ringbuffer" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3df6368f71f205ff9c33c076d170dd56ebf68e8161c733c0caa07a7a5509ed53" - [[package]] name = "ringbuffer" version = "0.16.0" @@ -13020,9 +13014,9 @@ dependencies = [ [[package]] name = "unicode-width" -version = "0.2.2" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" [[package]] name = "unicode-xid" @@ -14163,6 +14157,7 @@ dependencies = [ "reth-optimism-storage", "reth-optimism-txpool", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-payload-util", "reth-primitives", @@ -14245,7 +14240,9 @@ name = "xlayer-flashblocks" version = "0.1.0" dependencies = [ "alloy-consensus", + "alloy-eip7928", "alloy-eips", + "alloy-evm", "alloy-json-rpc", "alloy-network", "alloy-primitives", @@ -14254,7 +14251,9 @@ dependencies = [ "alloy-signer-local", "async-trait", "brotli", + "crossbeam-channel", "derive_more", + "either", "eyre", "futures", "futures-util", @@ -14264,8 +14263,12 @@ dependencies = [ "op-alloy-consensus", "op-alloy-rpc-types-engine", "op-revm", + "parking_lot", "reth-chain-state", + "reth-chainspec", + "reth-db-models", "reth-engine-primitives", + "reth-engine-tree", "reth-errors", "reth-evm", "reth-execution-types", @@ -14273,10 +14276,14 @@ dependencies = [ "reth-node-api", "reth-node-core", "reth-optimism-chainspec", + "reth-optimism-consensus", "reth-optimism-evm", "reth-optimism-flashblocks", + "reth-optimism-forks", "reth-optimism-payload-builder", "reth-optimism-primitives", + "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives-traits", "reth-provider", @@ -14289,13 +14296,19 @@ dependencies = [ "reth-storage-api", "reth-tasks", "reth-tracing", - "ringbuffer 0.15.0", + "reth-trie", + "reth-trie-common", + "reth-trie-db", + "reth-trie-parallel", + "ringbuffer", "serde", "serde_json", "test-case", + "thiserror 2.0.18", "tokio", "tokio-stream", "tokio-tungstenite 0.26.2", + "tokio-util", "tracing", "url", "xlayer-builder", @@ -14354,6 +14367,7 @@ dependencies = [ "op-alloy-network", "reth", "reth-basic-payload-builder", + "reth-chain-state", "reth-chainspec", "reth-cli-util", "reth-evm", @@ -14365,17 +14379,21 @@ dependencies = [ "reth-optimism-forks", "reth-optimism-node", "reth-optimism-payload-builder", + "reth-optimism-primitives", "reth-optimism-txpool", "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", "reth-payload-util", "reth-primitives-traits", + "reth-provider", "reth-revm", "reth-rpc-server-types", "reth-storage-api", + "reth-tasks", "reth-transaction-pool", "revm", + "tokio", "tracing", "url", "xlayer-bridge-intercept", @@ -14431,12 +14449,30 @@ dependencies = [ name = "xlayer-rpc" version = "0.1.0" dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "futures", "jsonrpsee", + "jsonrpsee-types", + "op-alloy-network", + "op-alloy-rpc-types", + "reth-chain-state", + "reth-optimism-primitives", "reth-optimism-rpc", - "reth-rpc", + "reth-primitives-traits", + "reth-revm", + "reth-rpc-convert", "reth-rpc-eth-api", - "serde", + "reth-rpc-eth-types", + "reth-rpc-server-types", + "reth-storage-api", "tokio", + "tokio-stream", + "tracing", + "xlayer-flashblocks", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 08b2759d..ef2dfca3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,7 +91,9 @@ reth-cli-commands = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11. reth-cli-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-db-models = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } reth-engine-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-engine-tree = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } reth-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } reth-ethereum-forks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } reth-evm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } @@ -130,6 +132,8 @@ reth-tracing-otlp = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11. reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } reth-trie = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } reth-trie-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-trie-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-trie-parallel = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } # ============================================================================== # Reth Optimism Dependencies (from local optimism/rust op-reth) @@ -167,6 +171,7 @@ alloy-chains = { version = "0.2.30", default-features = false } alloy-contract = { version = "~1.6" } alloy-consensus = { version = "~1.6", default-features = false } alloy-eips = { version = "~1.6", default-features = false } +alloy-eip7928 = { version = "0.3.0", default-features = false } alloy-evm = { version = "~0.27", default-features = false } alloy-genesis = { version = "~1.6", default-features = false } alloy-json-rpc = { version = "~1.6" } @@ -223,6 +228,7 @@ jsonrpsee-core = { version = "0.26.0" } # misc clap = { version = "4.4.3" } +crossbeam-channel = "0.5.13" derive_more = { version = "2", default-features = false, features = ["full"] } dashmap = "6.1" either = { version = "1.15.0", default-features = false } @@ -243,7 +249,7 @@ tracing = { version = "0.1.41" } shellexpand = "3.1" url = "2.5" brotli = "8.0" -ringbuffer = "0.15" +ringbuffer = "=0.16.0" # p2p libp2p = { version = "0.56", features = ["identify", "ping", "noise", "tcp", "autonat", "mdns", "tokio", "cbor", "macros", "yamux", "dns"] } diff --git a/bin/node/Cargo.toml b/bin/node/Cargo.toml index 96912dc7..b7190256 100644 --- a/bin/node/Cargo.toml +++ b/bin/node/Cargo.toml @@ -25,7 +25,9 @@ xlayer-trace-monitor.workspace = true # reth reth.workspace = true +reth-chain-state.workspace = true reth-optimism-node.workspace = true +reth-optimism-primitives.workspace = true reth-optimism-cli.workspace = true reth-node-api.workspace = true reth-node-builder.workspace = true @@ -46,7 +48,9 @@ reth-optimism-forks.workspace = true reth-optimism-txpool.workspace = true reth-transaction-pool.workspace = true reth-cli-util.workspace = true +reth-provider.workspace = true reth-rpc-server-types.workspace = true +reth-tasks = { workspace = true, features = ["rayon"] } # alloy alloy-primitives.workspace = true @@ -57,6 +61,9 @@ op-alloy-network.workspace = true # revm revm.workspace = true +# async +tokio.workspace = true + # misc clap.workspace = true tracing.workspace = true diff --git a/bin/node/src/args.rs b/bin/node/src/args.rs index 924e13fa..b5df985a 100644 --- a/bin/node/src/args.rs +++ b/bin/node/src/args.rs @@ -120,6 +120,10 @@ pub struct XLayerArgs { #[command(flatten)] pub builder: BuilderArgs, + /// Flashblocks RPC configuration + #[command(flatten)] + pub flashblocks_rpc: FlashblocksRpcArgs, + /// Enable legacy rpc routing #[command(flatten)] pub legacy: LegacyRpcArgs, @@ -132,22 +136,6 @@ pub struct XLayerArgs { #[command(flatten)] pub intercept: XLayerInterceptArgs, - /// Enable custom flashblocks subscription - #[arg( - long = "xlayer.flashblocks-subscription", - help = "Enable custom flashblocks subscription (disabled by default)", - default_value = "false" - )] - pub enable_flashblocks_subscription: bool, - - /// Set the number of subscribed addresses in flashblocks subscription - #[arg( - long = "xlayer.flashblocks-subscription-max-addresses", - help = "Set the number of subscribed addresses in flashblocks subscription", - default_value = "1000" - )] - pub flashblocks_subscription_max_addresses: usize, - #[arg( long = "xlayer.sequencer-mode", help = "Enable sequencer mode for the node (default: false, i.e., RPC mode). This flag can be used by various business logic components to determine node behavior.", @@ -247,6 +235,40 @@ impl LegacyRpcArgs { } } +#[derive(Debug, Clone, Args, PartialEq, Eq, Default)] +pub struct FlashblocksRpcArgs { + /// Enable flashblocks RPC + #[arg( + long = "xlayer.flashblocks-url", + help = "URL of the flashblocks RPC endpoint (disabled by default)" + )] + pub flashblock_url: Option, + + /// Enable custom flashblocks subscription + #[arg( + long = "xlayer.flashblocks-subscription", + help = "Enable custom flashblocks subscription (disabled by default)", + default_value = "false" + )] + pub enable_flashblocks_subscription: bool, + + /// Set the number of subscribed addresses in flashblocks subscription + #[arg( + long = "xlayer.flashblocks-subscription-max-addresses", + help = "Set the number of subscribed addresses in flashblocks subscription", + default_value = "1000" + )] + pub flashblocks_subscription_max_addresses: usize, + + /// Enable flashblocks RPC state comparison debug mode + #[arg( + long = "xlayer.flashblocks-debug-state-comparison", + help = "Enable flashblocks RPC state comparison debug mode", + default_value = "false" + )] + pub flashblocks_debug_state_comparison: bool, +} + #[cfg(test)] mod tests { use super::*; @@ -391,13 +413,19 @@ mod tests { "--xlayer.flashblocks-subscription", "--xlayer.flashblocks-subscription-max-addresses", "2000", + "--xlayer.flashblocks-url", + "ws://localhost:1111", ]) .args; - assert!(args.enable_flashblocks_subscription); assert!(args.legacy.legacy_rpc_url.is_some()); assert_eq!(args.legacy.legacy_rpc_timeout, Duration::from_secs(45)); - assert_eq!(args.flashblocks_subscription_max_addresses, 2000); + assert!(args.flashblocks_rpc.enable_flashblocks_subscription); + assert_eq!(args.flashblocks_rpc.flashblocks_subscription_max_addresses, 2000); + assert_eq!( + args.flashblocks_rpc.flashblock_url, + Some(Url::parse("ws://localhost:1111").unwrap()) + ); assert!(args.validate().is_ok()); } diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index f3c61f82..73cbf77c 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -8,25 +8,31 @@ use payload::XLayerPayloadServiceBuilder; use args::XLayerArgs; use clap::Parser; use either::Either; -use std::sync::Arc; +use std::sync::{Arc, OnceLock}; use tracing::info; -use op_alloy_network::Optimism; use reth::rpc::eth::EthApiTypes; use reth::{ builder::{DebugNodeLauncher, EngineNodeLauncher, Node, NodeHandle}, providers::providers::BlockchainProvider, }; +use reth_chainspec::ChainSpecProvider; use reth_optimism_cli::Cli; +use reth_optimism_evm::OpEvmConfig; use reth_optimism_node::{args::RollupArgs, OpNode}; +use reth_provider::CanonStateSubscriptions; use reth_rpc_server_types::RethRpcModule; use xlayer_chainspec::XLayerChainSpecParser; -use xlayer_flashblocks::handler::FlashblocksService; -use xlayer_flashblocks::subscription::FlashblocksPubSub; +use xlayer_flashblocks::{ + FlashblockStateCache, FlashblocksPersistCtx, FlashblocksPubSub, FlashblocksRpcCtx, + FlashblocksRpcService, WsFlashBlockStream, +}; use xlayer_legacy_rpc::{layer::LegacyRpcRouterLayer, LegacyRpcRouterConfig}; use xlayer_monitor::{start_monitor_handle, RpcMonitorLayer, XLayerMonitor}; -use xlayer_rpc::xlayer_ext::{XlayerRpcExt, XlayerRpcExtApiServer}; +use xlayer_rpc::{ + DefaultRpcExt, DefaultRpcExtApiServer, FlashblocksEthApiExt, FlashblocksEthApiOverrideServer, +}; #[global_allocator] static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); @@ -120,11 +126,18 @@ fn main() { ); } + // Get the payload events tx for pre-warming the engine tree with locally built + // pending flashblocks sequence. + let events_sender = Arc::new(OnceLock::new()); + let tree_config = builder.config().engine.tree_config(); + // Create the X Layer payload service builder // It handles both flashblocks and default modes internally let payload_builder = XLayerPayloadServiceBuilder::new( args.xlayer_args.builder.clone(), + args.xlayer_args.flashblocks_rpc.flashblock_url.is_some(), args.rollup_args.compute_pending_block, + events_sender.clone(), )? .with_bridge_config(bridge_config); @@ -135,47 +148,74 @@ fn main() { .extend_rpc_modules(move |ctx| { let new_op_eth_api = Arc::new(ctx.registry.eth_api().clone()); - // Initialize flashblocks RPC service if not in flashblocks sequencer mode - if !args.xlayer_args.builder.flashblocks.enabled { - if let Some(flashblock_rx) = new_op_eth_api.subscribe_received_flashblocks() - { - let service = FlashblocksService::new( - ctx.node().clone(), - flashblock_rx, - args.xlayer_args.builder.flashblocks, - args.rollup_args.flashblocks_url.is_some(), + let flashblocks_state = if let Some(flashblock_url) = + args.xlayer_args.flashblocks_rpc.flashblock_url + { + // Initialize flashblocks RPC + let flashblocks_state = FlashblockStateCache::new(ctx.provider().canonical_in_memory_state()); + let canon_state_rx = ctx.provider().canonical_state_stream(); + let service = FlashblocksRpcService::new( + args.xlayer_args.builder.flashblocks, + flashblocks_state.clone(), + ctx.node().task_executor.clone(), + FlashblocksRpcCtx { + provider: ctx.provider().clone(), + canon_state_rx, + evm_config: OpEvmConfig::optimism(ctx.provider().chain_spec()), + chain_spec: ctx.provider().chain_spec(), + tree_config, + debug_state_comparison: args.xlayer_args.flashblocks_rpc.flashblocks_debug_state_comparison, + }, + FlashblocksPersistCtx { datadir, - )?; - service.spawn(); - info!(target: "reth::cli", "xlayer flashblocks service initialized"); + relay_flashblocks: args.rollup_args.flashblocks_url.is_some(), + }, + )?; + if !args.xlayer_args.flashblocks_rpc.flashblocks_debug_state_comparison { + service.spawn_prewarm(events_sender); } + service.spawn_persistence()?; + service.spawn_rpc(WsFlashBlockStream::new(flashblock_url)); + info!(target: "reth::cli", "xlayer flashblocks service initialized"); - if xlayer_args.enable_flashblocks_subscription - && let Some(pending_blocks_rx) = new_op_eth_api.pending_block_rx() - { - let eth_pubsub = ctx.registry.eth_handlers().pubsub.clone(); - + // Initialize custom flashblocks subscription + if args.xlayer_args.flashblocks_rpc.enable_flashblocks_subscription { let flashblocks_pubsub = FlashblocksPubSub::new( - eth_pubsub, - pending_blocks_rx, + ctx.registry.eth_handlers().pubsub.clone(), + flashblocks_state.subscribe_pending_sequence(), Box::new(ctx.node().task_executor.clone()), new_op_eth_api.converter().clone(), - xlayer_args.flashblocks_subscription_max_addresses, + args.xlayer_args.flashblocks_rpc.flashblocks_subscription_max_addresses, ); ctx.modules.add_or_replace_if_module_configured( RethRpcModule::Eth, flashblocks_pubsub.into_rpc(), )?; - info!(target: "reth::cli", "xlayer eth pubsub initialized"); + info!(target: "reth::cli", "xlayer flashblocks pubsub initialized"); } - } + + // Register flashblocks Eth API overrides + let flashblocks_eth = FlashblocksEthApiExt::new( + ctx.registry.eth_api().clone(), + flashblocks_state.clone(), + ); + ctx.modules.add_or_replace_if_module_configured( + RethRpcModule::Eth, + FlashblocksEthApiOverrideServer::into_rpc(flashblocks_eth), + )?; + info!(target: "reth::cli", "xlayer flashblocks eth api overrides initialized"); + Some(flashblocks_state) + } else { + None + }; // Register X Layer RPC - let xlayer_rpc = XlayerRpcExt { backend: new_op_eth_api }; - ctx.modules.merge_configured(XlayerRpcExtApiServer::::into_rpc( + let xlayer_rpc = DefaultRpcExt::new(flashblocks_state); + ctx.modules.merge_configured(DefaultRpcExtApiServer::into_rpc( xlayer_rpc, ))?; - info!(target: "reth::cli", "xlayer rpc extension enabled"); + info!(target: "reth::cli", "xlayer eth rpc extension enabled"); + info!(message = "X Layer RPC modules initialized"); Ok(()) }) .launch_with_fn(|builder| { diff --git a/bin/node/src/payload.rs b/bin/node/src/payload.rs index be1a9cd3..b117fa33 100644 --- a/bin/node/src/payload.rs +++ b/bin/node/src/payload.rs @@ -1,3 +1,5 @@ +use std::sync::{Arc, OnceLock}; + use reth::builder::components::PayloadServiceBuilder; use reth_node_api::NodeTypes; use reth_node_builder::{components::BasicPayloadServiceBuilder, BuilderContext}; @@ -7,7 +9,7 @@ use reth_optimism_payload_builder::config::{OpDAConfig, OpGasLimitConfig}; use xlayer_bridge_intercept::BridgeInterceptConfig; use xlayer_builder::{ args::BuilderArgs, - flashblocks::{BuilderConfig, FlashblocksServiceBuilder}, + flashblocks::{BuilderConfig, FlashblocksServiceBuilder, PayloadEventsSender}, traits::{NodeBounds, PoolBounds}, }; @@ -28,27 +30,34 @@ pub struct XLayerPayloadServiceBuilder { impl XLayerPayloadServiceBuilder { pub fn new( xlayer_builder_args: BuilderArgs, + flashblock_rpc: bool, compute_pending_block: bool, + events_sender: Arc>, ) -> eyre::Result { Self::with_config( xlayer_builder_args, + flashblock_rpc, compute_pending_block, OpDAConfig::default(), OpGasLimitConfig::default(), + events_sender, ) } pub fn with_config( xlayer_builder_args: BuilderArgs, + flashblock_rpc: bool, compute_pending_block: bool, da_config: OpDAConfig, gas_limit_config: OpGasLimitConfig, + events_sender: Arc>, ) -> eyre::Result { - let builder = if xlayer_builder_args.flashblocks.enabled { + let builder = if xlayer_builder_args.flashblocks.enabled || flashblock_rpc { let builder_config = BuilderConfig::try_from(xlayer_builder_args)?; XLayerPayloadServiceBuilderInner::Flashblocks(Box::new(FlashblocksServiceBuilder { config: builder_config, bridge_intercept: Default::default(), + events_sender, })) } else { let payload_builder = OpPayloadBuilder::new(compute_pending_block) diff --git a/crates/builder/Cargo.toml b/crates/builder/Cargo.toml index 3ee2ceba..71fa2432 100644 --- a/crates/builder/Cargo.toml +++ b/crates/builder/Cargo.toml @@ -35,6 +35,7 @@ reth-rpc-engine-api.workspace = true reth-node-core.workspace = true reth-basic-payload-builder.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-execution-types.workspace = true reth-metrics.workspace = true reth-provider.workspace = true diff --git a/crates/builder/src/broadcast/mod.rs b/crates/builder/src/broadcast/mod.rs index 7fbfc226..2423ac0f 100644 --- a/crates/builder/src/broadcast/mod.rs +++ b/crates/builder/src/broadcast/mod.rs @@ -1,11 +1,14 @@ mod behaviour; mod outgoing; +pub(crate) mod payload; pub(crate) mod types; pub(crate) mod wspub; use behaviour::Behaviour; -use libp2p_stream::IncomingStreams; -use wspub::WebSocketPublisher; +pub use libp2p::{Multiaddr, StreamProtocol}; +pub use payload::XLayerFlashblockPayload; +pub use types::Message; +pub use wspub::WebSocketPublisher; use crate::metrics::BuilderMetrics; use eyre::Context; @@ -16,6 +19,7 @@ use libp2p::{ swarm::SwarmEvent, tcp, yamux, PeerId, Swarm, Transport as _, }; +use libp2p_stream::IncomingStreams; use multiaddr::Protocol; use std::{ collections::{HashMap, HashSet}, @@ -26,9 +30,6 @@ use tokio::sync::mpsc; use tokio_util::sync::CancellationToken; use tracing::{debug, warn}; -pub use libp2p::{Multiaddr, StreamProtocol}; -pub(crate) use types::Message; - const DEFAULT_MAX_PEER_COUNT: u32 = 50; const DEFAULT_PEER_RETRY_INTERVAL: Duration = Duration::from_secs(1); @@ -573,7 +574,6 @@ mod test { use super::*; use crate::broadcast::wspub::WebSocketPublisher; use crate::metrics::{tokio::FlashblocksTaskMetrics, BuilderMetrics}; - use op_alloy_rpc_types_engine::OpFlashblockPayload; const TEST_AGENT_VERSION: &str = "test/1.0.0"; @@ -640,7 +640,7 @@ mod test { tokio::spawn(async move { node1.run().await }); tokio::spawn(async move { node2.run().await }); - let message = Message::from_flashblock_payload(OpFlashblockPayload::default()); + let message = Message::from_flashblock_payload(XLayerFlashblockPayload::default()); let mut rx = rx1.remove(&types::FLASHBLOCKS_STREAM_PROTOCOL).unwrap(); let recv_message = tokio::time::timeout(Duration::from_secs(10), async { loop { @@ -765,7 +765,7 @@ mod test { handler.insert_peer_and_stream(peer_a, types::FLASHBLOCKS_STREAM_PROTOCOL, stream); assert!(handler.has_peer(&peer_a)); - let msg = Message::from_flashblock_payload(OpFlashblockPayload::default()); + let msg = Message::from_flashblock_payload(XLayerFlashblockPayload::default()); let failed = handler.broadcast_message(msg).await.expect("serialization must not fail"); assert_eq!(failed, vec![peer_a], "peer_a must be returned as a failed peer"); @@ -801,7 +801,7 @@ mod test { let mut handler = outgoing::StreamsHandler::new(); handler.insert_peer_and_stream(peer_a, types::FLASHBLOCKS_STREAM_PROTOCOL, stream); - let msg = Message::from_flashblock_payload(OpFlashblockPayload::default()); + let msg = Message::from_flashblock_payload(XLayerFlashblockPayload::default()); let failed = handler.broadcast_message(msg).await.expect("serialization must not fail"); assert!(failed.is_empty(), "no peers should fail on a healthy stream"); diff --git a/crates/builder/src/broadcast/payload.rs b/crates/builder/src/broadcast/payload.rs new file mode 100644 index 00000000..5a7a3bb8 --- /dev/null +++ b/crates/builder/src/broadcast/payload.rs @@ -0,0 +1,52 @@ +use op_alloy_rpc_types_engine::OpFlashblockPayload; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)] +pub struct XLayerFlashblockPayload { + #[serde(flatten)] + pub inner: OpFlashblockPayload, + /// The target flashblock index that the builder will build until. Default to zero if + /// unset yet, for base flashblock payload. + #[serde(default)] + pub target_index: u64, +} + +impl XLayerFlashblockPayload { + pub fn new(inner: OpFlashblockPayload, target_index: u64) -> Self { + Self { inner, target_index } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_xlayer_payload_serializes_flat() { + let payload = OpFlashblockPayload::default(); + let wrapped = XLayerFlashblockPayload::new(payload.clone(), 7); + let json = serde_json::to_string(&wrapped).unwrap(); + // target_index should appear at top level, not nested + assert!(json.contains("\"target_index\":7")); + // inner fields should also be at top level + assert!(json.contains("\"index\":")); + } + + #[test] + fn test_backwards_compat_old_consumer_ignores_target_index() { + let payload = OpFlashblockPayload::default(); + let wrapped = XLayerFlashblockPayload::new(payload, 7); + let json = serde_json::to_string(&wrapped).unwrap(); + // Old consumer deserializes as OpFlashblockPayload — should succeed + let _: OpFlashblockPayload = serde_json::from_str(&json).unwrap(); + } + + #[test] + fn test_backwards_compat_new_consumer_defaults_target_index() { + let payload = OpFlashblockPayload::default(); + let json = serde_json::to_string(&payload).unwrap(); + // New consumer deserializes as XLayerFlashblockPayload — target_index defaults to 0 + let wrapped: XLayerFlashblockPayload = serde_json::from_str(&json).unwrap(); + assert_eq!(wrapped.target_index, 0); + } +} diff --git a/crates/builder/src/broadcast/types.rs b/crates/builder/src/broadcast/types.rs index d842cce3..671d5dca 100644 --- a/crates/builder/src/broadcast/types.rs +++ b/crates/builder/src/broadcast/types.rs @@ -1,5 +1,5 @@ +use crate::broadcast::XLayerFlashblockPayload; use alloy_primitives::U256; -use op_alloy_rpc_types_engine::OpFlashblockPayload; use serde::{Deserialize, Serialize}; use reth::{core::primitives::SealedBlock, payload::PayloadId}; @@ -11,9 +11,9 @@ pub(crate) const FLASHBLOCKS_STREAM_PROTOCOL: crate::broadcast::StreamProtocol = crate::broadcast::StreamProtocol::new("/flashblocks/1.0.0"); #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] -pub(crate) enum Message { +pub enum Message { OpBuiltPayload(OpBuiltPayload), - OpFlashblockPayload(OpFlashblockPayload), + OpFlashblockPayload(XLayerFlashblockPayload), } impl Message { @@ -25,13 +25,13 @@ impl Message { /// Internal type analogous to [`reth_optimism_payload_builder::OpBuiltPayload`] /// which additionally implements `Serialize` and `Deserialize` for p2p transmission. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] -pub(crate) struct OpBuiltPayload { +pub struct OpBuiltPayload { /// Identifier of the payload - pub(crate) id: PayloadId, + pub id: PayloadId, /// Sealed block - pub(crate) block: SealedBlock, + pub block: SealedBlock, /// The fees of the block - pub(crate) fees: U256, + pub fees: U256, } impl Message { @@ -39,7 +39,7 @@ impl Message { Message::OpBuiltPayload(value.into()) } - pub(crate) fn from_flashblock_payload(value: OpFlashblockPayload) -> Self { + pub(crate) fn from_flashblock_payload(value: XLayerFlashblockPayload) -> Self { Message::OpFlashblockPayload(value) } } diff --git a/crates/builder/src/broadcast/wspub.rs b/crates/builder/src/broadcast/wspub.rs index 4524caed..ac8bbd75 100644 --- a/crates/builder/src/broadcast/wspub.rs +++ b/crates/builder/src/broadcast/wspub.rs @@ -1,3 +1,6 @@ +use crate::{ + broadcast::XLayerFlashblockPayload, metrics::tokio::MonitoredTask, metrics::BuilderMetrics, +}; use core::{ fmt::{Debug, Formatter}, net::SocketAddr, @@ -5,7 +8,6 @@ use core::{ }; use futures::SinkExt; use futures_util::StreamExt; -use op_alloy_rpc_types_engine::OpFlashblockPayload; use std::{io, net::TcpListener, sync::Arc}; use tokio::{ net::TcpStream, @@ -24,8 +26,6 @@ use tokio_tungstenite::{ }; use tracing::{debug, info, trace, warn}; -use crate::{metrics::tokio::MonitoredTask, metrics::BuilderMetrics}; - /// A WebSockets publisher that accepts connections from client websockets and broadcasts to them /// updates about new flashblocks. It maintains a count of sent messages and active subscriptions. /// @@ -65,7 +65,7 @@ impl WebSocketPublisher { Ok(Self { sent, subs, term, pipe, subscriber_limit }) } - pub fn publish(&self, payload: &OpFlashblockPayload) -> io::Result { + pub fn publish(&self, payload: &XLayerFlashblockPayload) -> io::Result { // Serialize the payload to a UTF-8 string // serialize only once, then just copy around only a pointer // to the serialized data for each subscription. @@ -73,9 +73,10 @@ impl WebSocketPublisher { target: "payload_builder::broadcast", event = "flashblock_sent", message = "Sending flashblock to subscribers", - id = %payload.payload_id, - index = payload.index, - base = payload.base.is_some(), + id = %payload.inner.payload_id, + index = payload.inner.index, + base = payload.inner.base.is_some(), + target_index = payload.target_index, ); let serialized = serde_json::to_string(payload)?; diff --git a/crates/builder/src/flashblocks/builder.rs b/crates/builder/src/flashblocks/builder.rs index afc2ba72..27814290 100644 --- a/crates/builder/src/flashblocks/builder.rs +++ b/crates/builder/src/flashblocks/builder.rs @@ -1,4 +1,5 @@ use crate::{ + broadcast::XLayerFlashblockPayload, flashblocks::{ best_txs::BestFlashblocksTxs, builder_tx::FlashblocksBuilderTx, @@ -193,7 +194,7 @@ pub(super) struct FlashblocksBuilder { pub task_executor: Tasks, /// Sender for sending built flashblock payloads to [`PayloadHandler`], /// which broadcasts outgoing flashblock payloads via p2p. - pub built_fb_payload_tx: mpsc::Sender, + pub built_fb_payload_tx: mpsc::Sender, /// Sender for sending built full block payloads to [`PayloadHandler`], /// which updates the engine tree state. pub built_payload_tx: mpsc::Sender, @@ -221,7 +222,7 @@ impl FlashblocksBuilder { task_executor: Tasks, config: BuilderConfig, builder_tx: FlashblocksBuilderTx, - built_fb_payload_tx: mpsc::Sender, + built_fb_payload_tx: mpsc::Sender, built_payload_tx: mpsc::Sender, p2p_cache: FlashblockPayloadsCache, metrics: Arc, @@ -425,8 +426,10 @@ where // not emitting flashblock if no_tx_pool in FCU, it's just syncing // For X Layer - skip if replaying if !ctx.attributes().no_tx_pool && !rebuild_external_payload { + // For X Layer - skip if replaying + let fb_payload_with_count = XLayerFlashblockPayload::new(fb_payload.clone(), 0); self.built_fb_payload_tx - .try_send(fb_payload.clone()) + .try_send(fb_payload_with_count.clone()) .map_err(PayloadBuilderError::other)?; // For X Layer, full link monitoring support @@ -759,8 +762,12 @@ where fb_payload.index = flashblock_index; fb_payload.base = None; + let fb_payload_with_count = XLayerFlashblockPayload::new( + fb_payload.clone(), + fb_state.target_flashblock_count(), + ); self.built_fb_payload_tx - .try_send(fb_payload) + .try_send(fb_payload_with_count) .wrap_err("failed to send built payload to handler")?; *best_payload = (new_payload, bundle_state); diff --git a/crates/builder/src/flashblocks/handler.rs b/crates/builder/src/flashblocks/handler.rs index 353a5aa3..dcadbc6e 100644 --- a/crates/builder/src/flashblocks/handler.rs +++ b/crates/builder/src/flashblocks/handler.rs @@ -1,5 +1,5 @@ use crate::{ - broadcast::{wspub::WebSocketPublisher, Message}, + broadcast::{Message, WebSocketPublisher, XLayerFlashblockPayload}, flashblocks::{ handler_ctx::FlashblockHandlerContext, utils::{cache::FlashblockPayloadsCache, execution::ExecutionInfo}, @@ -40,7 +40,7 @@ pub(crate) struct FlashblocksPayloadHandler { // handler context for external flashblock execution ctx: FlashblockHandlerContext, // receives new flashblock payloads built by this builder. - built_fb_payload_rx: mpsc::Receiver, + built_fb_payload_rx: mpsc::Receiver, // receives new full block payloads built by this builder. built_payload_rx: mpsc::Receiver, // receives incoming p2p messages from peers. @@ -70,7 +70,7 @@ where #[allow(clippy::too_many_arguments)] pub(crate) fn new( ctx: FlashblockHandlerContext, - built_fb_payload_rx: mpsc::Receiver, + built_fb_payload_rx: mpsc::Receiver, built_payload_rx: mpsc::Receiver, p2p_rx: mpsc::Receiver, p2p_tx: mpsc::Sender, @@ -185,7 +185,7 @@ where })); } Message::OpFlashblockPayload(fb_payload) => { - if let Err(e) = p2p_cache.add_flashblock_payload(fb_payload.clone()) { + if let Err(e) = p2p_cache.add_flashblock_payload(fb_payload.inner.clone()) { warn!(target: "payload_builder", e = ?e, "failed to add flashblock txs to cache"); } if let Err(e) = ws_pub.publish(&fb_payload) { diff --git a/crates/builder/src/flashblocks/mod.rs b/crates/builder/src/flashblocks/mod.rs index e090472a..2ed31142 100644 --- a/crates/builder/src/flashblocks/mod.rs +++ b/crates/builder/src/flashblocks/mod.rs @@ -19,9 +19,8 @@ mod service; mod timing; pub(crate) mod utils; -pub use crate::broadcast::wspub::WebSocketPublisher; pub use context::FlashblocksBuilderCtx; -pub use service::FlashblocksServiceBuilder; +pub use service::{FlashblocksServiceBuilder, PayloadEventsSender}; pub use utils::cache::FlashblockPayloadsCache; /// Configuration values that are specific to the flashblocks builder. diff --git a/crates/builder/src/flashblocks/service.rs b/crates/builder/src/flashblocks/service.rs index 3db2401d..5a59748f 100644 --- a/crates/builder/src/flashblocks/service.rs +++ b/crates/builder/src/flashblocks/service.rs @@ -13,18 +13,26 @@ use crate::{ traits::{NodeBounds, PoolBounds}, }; use eyre::WrapErr as _; -use std::{sync::Arc, time::Duration}; +use std::{ + sync::{Arc, OnceLock}, + time::Duration, +}; use reth_basic_payload_builder::BasicPayloadJobGeneratorConfig; use reth_node_api::NodeTypes; use reth_node_builder::{components::PayloadServiceBuilder, BuilderContext}; use reth_optimism_evm::OpEvmConfig; +use reth_optimism_node::OpEngineTypes; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; +use reth_payload_builder_primitives::Events; use reth_provider::CanonStateSubscriptions; +pub type PayloadEventsSender = tokio::sync::broadcast::Sender>; + pub struct FlashblocksServiceBuilder { pub config: BuilderConfig, pub bridge_intercept: xlayer_bridge_intercept::BridgeInterceptConfig, + pub events_sender: Arc>, } impl FlashblocksServiceBuilder { @@ -152,6 +160,8 @@ impl FlashblocksServiceBuilder { let (payload_service, payload_builder_handle) = PayloadBuilderService::new(payload_generator, ctx.provider().canonical_state_stream()); + let _ = self.events_sender.set(payload_service.payload_events_handle()); + let handler_ctx = FlashblockHandlerContext::new( &ctx.provider().clone(), self.config.clone(), diff --git a/crates/builder/src/lib.rs b/crates/builder/src/lib.rs index 7b5c6816..64def70c 100644 --- a/crates/builder/src/lib.rs +++ b/crates/builder/src/lib.rs @@ -1,5 +1,5 @@ pub mod args; -pub(crate) mod broadcast; +pub mod broadcast; pub mod flashblocks; pub mod metrics; pub(crate) mod signer; diff --git a/crates/builder/src/tests/framework/instance.rs b/crates/builder/src/tests/framework/instance.rs index 6fddcf92..68244de0 100644 --- a/crates/builder/src/tests/framework/instance.rs +++ b/crates/builder/src/tests/framework/instance.rs @@ -41,7 +41,7 @@ use reth_optimism_rpc::OpEthApiBuilder; use reth_optimism_txpool::OpPooledTransaction; use reth_transaction_pool::{AllTransactionsEvents, TransactionPool}; use std::{ - sync::{Arc, LazyLock}, + sync::{Arc, LazyLock, OnceLock}, time::Instant, }; use tokio::{sync::oneshot, task::JoinHandle}; @@ -117,6 +117,7 @@ impl LocalInstance { FlashblocksServiceBuilder { config: builder_config, bridge_intercept: Default::default(), + events_sender: Arc::new(OnceLock::new()), }, )) .with_add_ons(addons) diff --git a/crates/flashblocks/Cargo.toml b/crates/flashblocks/Cargo.toml index 0b326de5..5a75a59f 100644 --- a/crates/flashblocks/Cargo.toml +++ b/crates/flashblocks/Cargo.toml @@ -15,31 +15,45 @@ xlayer-builder.workspace = true # reth reth-chain-state = { workspace = true, features = ["serde"] } -reth-engine-primitives = { workspace = true, features = ["std"] } +reth-chainspec.workspace = true +reth-db-models.workspace = true +reth-engine-primitives.workspace = true +reth-engine-tree.workspace = true reth-errors.workspace = true reth-evm.workspace = true reth-execution-types = { workspace = true, features = ["serde"] } reth-metrics.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true +reth-optimism-consensus.workspace = true +reth-optimism-flashblocks.workspace = true +reth-optimism-forks.workspace = true reth-optimism-payload-builder.workspace = true reth-optimism-primitives = { workspace = true, features = ["serde"] } +reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-primitives-traits = { workspace = true, features = ["serde"] } +reth-provider.workspace = true reth-revm.workspace = true reth-rpc.workspace = true reth-rpc-convert.workspace = true reth-rpc-eth-api.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true -reth-optimism-flashblocks.workspace = true reth-storage-api.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-tracing.workspace = true +reth-trie.workspace = true +reth-trie-common.workspace = true +reth-trie-db.workspace = true +reth-trie-parallel.workspace = true # alloy alloy-consensus.workspace = true alloy-eips = { workspace = true, features = ["serde"] } +alloy-eip7928.workspace = true +alloy-evm.workspace = true alloy-json-rpc.workspace = true alloy-primitives = { workspace = true, features = ["serde"] } alloy-rpc-types-engine = { workspace = true, features = ["serde"] } @@ -56,6 +70,7 @@ futures-util.workspace = true tokio.workspace = true tokio-stream.workspace = true tokio-tungstenite.workspace = true +tokio-util.workspace = true # rpc jsonrpsee.workspace = true @@ -63,8 +78,12 @@ async-trait.workspace = true # misc brotli = { workspace = true, features = ["std"] } +crossbeam-channel.workspace = true derive_more.workspace = true +either.workspace = true eyre.workspace = true +parking_lot.workspace = true +thiserror.workspace = true tracing.workspace = true metrics.workspace = true moka.workspace = true diff --git a/crates/flashblocks/src/cache.rs b/crates/flashblocks/src/cache.rs deleted file mode 100644 index 93b5ad90..00000000 --- a/crates/flashblocks/src/cache.rs +++ /dev/null @@ -1,2165 +0,0 @@ -//! Sequence cache management for flashblocks. -//! -//! The `SequenceManager` maintains a ring buffer of recently completed flashblock sequences -//! and intelligently selects which sequence to build based on the local chain tip. - -use crate::{ - pending_state::PendingBlockState, - sequence::{FlashBlockPendingSequence, SequenceExecutionOutcome}, - validation::{ - CanonicalBlockFingerprint, CanonicalBlockReconciler, ReconciliationStrategy, ReorgDetector, - TrackedBlockFingerprint, - }, - worker::BuildArgs, - FlashBlock, FlashBlockCompleteSequence, PendingFlashBlock, -}; -use alloy_eips::eip2718::WithEncoded; -use alloy_primitives::B256; -use alloy_rpc_types_engine::PayloadId; -use reth_primitives_traits::{ - transaction::TxHashRef, NodePrimitives, Recovered, SignedTransaction, -}; -use reth_revm::cached::CachedReads; -use ringbuffer::{AllocRingBuffer, RingBuffer}; -use std::collections::{BTreeMap, HashSet}; -use tokio::sync::broadcast; -use tracing::*; - -/// Maximum number of cached sequences in the ring buffer. -const CACHE_SIZE: usize = 3; -/// 200 ms flashblock time. -pub(crate) const FLASHBLOCK_BLOCK_TIME: u64 = 200; - -/// Stable identity for a tracked flashblock sequence. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub(crate) struct SequenceId { - pub(crate) block_number: u64, - pub(crate) payload_id: PayloadId, - pub(crate) parent_hash: B256, -} - -impl SequenceId { - fn from_pending(sequence: &FlashBlockPendingSequence) -> Option { - let base = sequence.payload_base()?; - let payload_id = sequence.payload_id()?; - Some(Self { block_number: base.block_number, payload_id, parent_hash: base.parent_hash }) - } - - fn from_complete(sequence: &FlashBlockCompleteSequence) -> Self { - Self { - block_number: sequence.block_number(), - payload_id: sequence.payload_id(), - parent_hash: sequence.payload_base().parent_hash, - } - } -} - -/// Snapshot selector for build-completion matching. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -enum SequenceSnapshot { - Pending { revision: u64 }, - Cached, -} - -/// Opaque ticket that identifies the exact sequence snapshot selected for a build. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub(crate) struct BuildTicket { - sequence_id: SequenceId, - snapshot: SequenceSnapshot, -} - -impl BuildTicket { - const fn pending(sequence_id: SequenceId, revision: u64) -> Self { - Self { sequence_id, snapshot: SequenceSnapshot::Pending { revision } } - } - - const fn cached(sequence_id: SequenceId) -> Self { - Self { sequence_id, snapshot: SequenceSnapshot::Cached } - } -} - -/// Result of attempting to apply a build completion to tracked sequence state. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub(crate) enum BuildApplyOutcome { - SkippedNoBuildResult, - AppliedPending, - AppliedCached { - rebroadcasted: bool, - }, - RejectedPendingSequenceMismatch { - ticket_sequence_id: SequenceId, - current_sequence_id: Option, - }, - RejectedPendingRevisionStale { - sequence_id: SequenceId, - ticket_revision: u64, - current_revision: u64, - }, - RejectedCachedSequenceMissing { - sequence_id: SequenceId, - }, -} - -impl BuildApplyOutcome { - pub(crate) const fn is_applied(self) -> bool { - matches!(self, Self::AppliedPending | Self::AppliedCached { .. }) - } -} - -/// A buildable sequence plus the stable identity that selected it. -pub(crate) struct BuildCandidate { - pub(crate) ticket: BuildTicket, - pub(crate) args: BuildArgs, -} - -impl std::ops::Deref for BuildCandidate { - type Target = BuildArgs; - - fn deref(&self) -> &Self::Target { - &self.args - } -} - -/// In-progress pending sequence state. -/// -/// Keeps accepted flashblocks and recovered transactions in lockstep by index. -#[derive(Debug)] -struct PendingSequence { - sequence: FlashBlockPendingSequence, - recovered_transactions_by_index: BTreeMap>>>, - revision: u64, - applied_revision: Option, -} - -impl PendingSequence { - fn new() -> Self { - Self { - sequence: FlashBlockPendingSequence::new(), - recovered_transactions_by_index: BTreeMap::new(), - revision: 0, - applied_revision: None, - } - } - - const fn sequence(&self) -> &FlashBlockPendingSequence { - &self.sequence - } - - fn count(&self) -> usize { - self.sequence.count() - } - - const fn revision(&self) -> u64 { - self.revision - } - - fn clear(&mut self) { - self.sequence = FlashBlockPendingSequence::new(); - self.recovered_transactions_by_index.clear(); - self.applied_revision = None; - } - - const fn bump_revision(&mut self) { - self.revision = self.revision.wrapping_add(1); - } - - fn is_revision_applied(&self, revision: u64) -> bool { - self.applied_revision == Some(revision) - } - - const fn mark_revision_applied(&mut self, revision: u64) { - self.applied_revision = Some(revision); - } - - fn insert_flashblock(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { - if !self.sequence.can_accept(&flashblock) { - self.sequence.insert(flashblock); - return Ok(()); - } - - // Only recover transactions once we've validated that this flashblock is accepted. - let recovered_txs = flashblock.recover_transactions().collect::, _>>()?; - let flashblock_index = flashblock.index; - - // Index 0 starts a fresh pending block, so clear any stale in-progress data. - if flashblock_index == 0 { - self.clear(); - } - - self.sequence.insert(flashblock); - self.recovered_transactions_by_index.insert(flashblock_index, recovered_txs); - self.bump_revision(); - Ok(()) - } - - fn finalize( - &mut self, - ) -> eyre::Result<(FlashBlockCompleteSequence, Vec>>)> { - let finalized = self.sequence.finalize(); - let recovered_by_index = std::mem::take(&mut self.recovered_transactions_by_index); - - match finalized { - Ok(completed) => Ok((completed, recovered_by_index.into_values().flatten().collect())), - Err(err) => Err(err), - } - } - - fn transactions(&self) -> Vec>> { - self.recovered_transactions_by_index.values().flatten().cloned().collect() - } - - fn tx_hashes(&self) -> Vec { - self.recovered_transactions_by_index.values().flatten().map(|tx| *tx.tx_hash()).collect() - } - - #[cfg(test)] - fn transaction_count(&self) -> usize { - self.recovered_transactions_by_index.values().map(Vec::len).sum() - } -} - -/// Manages flashblock sequences with caching support. -/// -/// This struct handles: -/// - Tracking the current pending sequence -/// - Caching completed sequences in a fixed-size ring buffer -/// - Finding the best sequence to build based on local chain tip -/// - Broadcasting completed sequences to subscribers -#[derive(Debug)] -pub(crate) struct SequenceManager { - /// Current pending sequence being built up from incoming flashblocks - pending: PendingSequence, - /// Ring buffer of recently completed sequences bundled with their decoded transactions (FIFO, - /// size 3) - completed_cache: AllocRingBuffer<(FlashBlockCompleteSequence, Vec>>)>, - /// Cached sequence identities that already had a build completion applied. - applied_cached_sequences: HashSet, - /// Cached minimum block number currently present in `completed_cache`. - cached_min_block_number: Option, - /// Broadcast channel for completed sequences - block_broadcaster: broadcast::Sender, - /// Whether to compute state roots when building blocks - compute_state_root: bool, -} - -impl SequenceManager { - /// Creates a new sequence manager. - pub(crate) fn new(compute_state_root: bool) -> Self { - let (block_broadcaster, _) = broadcast::channel(128); - Self { - pending: PendingSequence::new(), - completed_cache: AllocRingBuffer::new(CACHE_SIZE), - applied_cached_sequences: HashSet::new(), - cached_min_block_number: None, - block_broadcaster, - compute_state_root, - } - } - - /// Returns the sender half of the flashblock sequence broadcast channel. - pub(crate) const fn block_sequence_broadcaster( - &self, - ) -> &broadcast::Sender { - &self.block_broadcaster - } - - /// Gets a subscriber to the flashblock sequences produced. - pub(crate) fn subscribe_block_sequence(&self) -> crate::FlashBlockCompleteSequenceRx { - self.block_broadcaster.subscribe() - } - - /// Inserts a new flashblock into the pending sequence. - /// - /// When a flashblock with index 0 arrives (indicating a new block), the current - /// pending sequence is finalized, cached, and broadcast immediately. If the sequence - /// is later built on top of local tip, `on_build_complete()` will broadcast again - /// with computed `state_root`. - /// - /// Transactions are recovered once and cached for reuse during block building. - pub(crate) fn insert_flashblock(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { - // If this starts a new block, finalize and cache the previous sequence BEFORE inserting - if flashblock.index == 0 && self.pending.count() > 0 { - let (completed, txs) = self.pending.finalize()?; - let block_number = completed.block_number(); - let parent_hash = completed.payload_base().parent_hash; - - trace!( - target: "flashblocks", - block_number, - %parent_hash, - cache_size = self.completed_cache.len(), - "Caching completed flashblock sequence" - ); - - // Broadcast immediately to consensus client (even without state_root) - // This ensures sequences are forwarded during catch-up even if not buildable on tip. - // ConsensusClient checks execution_outcome and skips newPayload if state_root is zero. - if self.block_broadcaster.receiver_count() > 0 { - let _ = self.block_broadcaster.send(completed.clone()); - } - - // Bundle completed sequence with its decoded transactions and push to cache - // Ring buffer automatically evicts oldest entry when full - self.push_completed_sequence(completed, txs); - } - - self.pending.insert_flashblock(flashblock)?; - Ok(()) - } - - /// Pushes a completed sequence into the cache and maintains cached min block-number metadata. - fn push_completed_sequence( - &mut self, - completed: FlashBlockCompleteSequence, - txs: Vec>>, - ) { - let block_number = completed.block_number(); - let completed_sequence_id = SequenceId::from_complete(&completed); - let evicted_block_number = if self.completed_cache.is_full() { - self.completed_cache.front().map(|(seq, _)| seq.block_number()) - } else { - None - }; - let evicted_sequence_id = if self.completed_cache.is_full() { - self.completed_cache.front().map(|(seq, _)| SequenceId::from_complete(seq)) - } else { - None - }; - - if let Some(sequence_id) = evicted_sequence_id { - self.applied_cached_sequences.remove(&sequence_id); - } - // Re-tracking a sequence identity should always start as unapplied. - self.applied_cached_sequences.remove(&completed_sequence_id); - - self.completed_cache.enqueue((completed, txs)); - - self.cached_min_block_number = match self.cached_min_block_number { - None => Some(block_number), - Some(current_min) if block_number < current_min => Some(block_number), - Some(current_min) if Some(current_min) == evicted_block_number => { - self.recompute_cache_min_block_number() - } - Some(current_min) => Some(current_min), - }; - } - - /// Recomputes the minimum block number in `completed_cache`. - fn recompute_cache_min_block_number(&self) -> Option { - self.completed_cache.iter().map(|(seq, _)| seq.block_number()).min() - } - - /// Returns the newest cached sequence that matches `parent_hash` and still needs execution. - /// - /// Cached sequences that already had build completion applied are skipped to avoid redundant - /// rebuild loops. - fn newest_unexecuted_cached_for_parent( - &self, - parent_hash: B256, - ) -> Option<&(FlashBlockCompleteSequence, Vec>>)> { - self.completed_cache.iter().rev().find(|(seq, _)| { - let sequence_id = SequenceId::from_complete(seq); - seq.payload_base().parent_hash == parent_hash - && !self.applied_cached_sequences.contains(&sequence_id) - }) - } - - /// Returns a mutable cached sequence entry by exact sequence identity. - fn cached_entry_mut_by_id( - &mut self, - sequence_id: SequenceId, - ) -> Option<&mut (FlashBlockCompleteSequence, Vec>>)> { - self.completed_cache - .iter_mut() - .find(|(seq, _)| SequenceId::from_complete(seq) == sequence_id) - } - - /// Returns the current pending sequence for inspection. - pub(crate) const fn pending(&self) -> &FlashBlockPendingSequence { - self.pending.sequence() - } - - /// Finds the next sequence to build and returns the selected sequence identity - /// with ready-to-use `BuildArgs`. - /// - /// Priority order: - /// 1. Current pending sequence (if parent matches local tip) - /// 2. Cached sequence with exact parent match - /// 3. Speculative: pending sequence with pending parent state (if provided) - /// - /// Returns None if nothing is buildable right now. - pub(crate) fn next_buildable_args>( - &mut self, - local_tip_hash: B256, - local_tip_timestamp: u64, - pending_parent_state: Option>, - ) -> Option>>, N>> { - // Try to find a buildable sequence: (ticket, base, last_fb, transactions, - // cached_state, source_name, pending_parent) - let (ticket, base, last_flashblock, transactions, cached_state, source_name, pending_parent) = - // Priority 1: Try current pending sequence (canonical mode) - if let Some(base) = self.pending.sequence.payload_base().filter(|b| b.parent_hash == local_tip_hash) { - let revision = self.pending.revision(); - if self.pending.is_revision_applied(revision) { - trace!( - target: "flashblocks", - block_number = base.block_number, - revision, - parent_hash = ?base.parent_hash, - "Skipping rebuild for already-applied pending revision" - ); - return None; - } - let sequence_id = SequenceId::from_pending(self.pending.sequence())?; - let ticket = BuildTicket::pending(sequence_id, revision); - let cached_state = self.pending.sequence.take_cached_reads().map(|r| (base.parent_hash, r)); - let last_fb = self.pending.sequence.last_flashblock()?; - let transactions = self.pending.transactions(); - (ticket, base, last_fb, transactions, cached_state, "pending", None) - } - // Priority 2: Try cached sequence with exact parent match (canonical mode) - else if let Some((cached, txs)) = self.newest_unexecuted_cached_for_parent(local_tip_hash) { - let sequence_id = SequenceId::from_complete(cached); - let ticket = BuildTicket::cached(sequence_id); - let base = cached.payload_base().clone(); - let last_fb = cached.last(); - let transactions = txs.clone(); - let cached_state = None; - (ticket, base, last_fb, transactions, cached_state, "cached", None) - } - // Priority 3: Try speculative building with pending parent state - else if let Some(ref pending_state) = pending_parent_state { - // Check if pending sequence's parent matches the pending state's block - if let Some(base) = self.pending.sequence.payload_base().filter(|b| b.parent_hash == pending_state.block_hash) { - let revision = self.pending.revision(); - if self.pending.is_revision_applied(revision) { - trace!( - target: "flashblocks", - block_number = base.block_number, - revision, - speculative_parent = ?pending_state.block_hash, - "Skipping speculative rebuild for already-applied pending revision" - ); - return None; - } - let sequence_id = SequenceId::from_pending(self.pending.sequence())?; - let ticket = BuildTicket::pending(sequence_id, revision); - let cached_state = self.pending.sequence.take_cached_reads().map(|r| (base.parent_hash, r)); - let last_fb = self.pending.sequence.last_flashblock()?; - let transactions = self.pending.transactions(); - ( - ticket, - base, - last_fb, - transactions, - cached_state, - "speculative-pending", - pending_parent_state, - ) - } - // Check cached sequences - else if let Some((cached, txs)) = self.newest_unexecuted_cached_for_parent(pending_state.block_hash) { - let sequence_id = SequenceId::from_complete(cached); - let ticket = BuildTicket::cached(sequence_id); - let base = cached.payload_base().clone(); - let last_fb = cached.last(); - let transactions = txs.clone(); - let cached_state = None; - ( - ticket, - base, - last_fb, - transactions, - cached_state, - "speculative-cached", - pending_parent_state, - ) - } else { - return None; - } - } else { - return None; - }; - - // Auto-detect when to compute state root: only if the builder didn't provide it (sent - // B256::ZERO) and we're near the expected final flashblock index. - // - // Background: Each block period receives multiple flashblocks at regular intervals. - // The sequencer sends an initial "base" flashblock at index 0 when a new block starts, - // then subsequent flashblocks are produced every FLASHBLOCK_BLOCK_TIME intervals (200ms). - // - // Examples with different block times: - // - Base (2s blocks): expect 2000ms / 200ms = 10 intervals → Flashblocks: index 0 (base) - // + indices 1-10 = potentially 11 total - // - // - Unichain (1s blocks): expect 1000ms / 200ms = 5 intervals → Flashblocks: index 0 (base) - // + indices 1-5 = potentially 6 total - // - // Why compute at N-1 instead of N: - // 1. Timing variance in flashblock producing time may mean only N flashblocks were produced - // instead of N+1 (missing the final one). Computing at N-1 ensures we get the state root - // for most common cases. - // - // 2. The +1 case (index 0 base + N intervals): If all N+1 flashblocks do arrive, we'll - // still calculate state root for flashblock N, which sacrifices a little performance but - // still ensures correctness for common cases. - // - // Note: Pathological cases may result in fewer flashblocks than expected (e.g., builder - // downtime, flashblock execution exceeding timing budget). When this occurs, we won't - // compute the state root, causing FlashblockConsensusClient to lack precomputed state for - // engine_newPayload. This is safe: we still have op-node as backstop to maintain - // chain progression. - let block_time_ms = base.timestamp.saturating_sub(local_tip_timestamp) * 1000; - let expected_final_flashblock = block_time_ms / FLASHBLOCK_BLOCK_TIME; - let compute_state_root = self.compute_state_root - && last_flashblock.diff.state_root.is_zero() - && last_flashblock.index >= expected_final_flashblock.saturating_sub(1); - - trace!( - target: "flashblocks", - block_number = base.block_number, - source = source_name, - ticket = ?ticket, - flashblock_index = last_flashblock.index, - expected_final_flashblock, - compute_state_root_enabled = self.compute_state_root, - state_root_is_zero = last_flashblock.diff.state_root.is_zero(), - will_compute_state_root = compute_state_root, - is_speculative = pending_parent.is_some(), - "Building from flashblock sequence" - ); - - Some(BuildCandidate { - ticket, - args: BuildArgs { - base, - transactions, - cached_state, - last_flashblock_index: last_flashblock.index, - last_flashblock_hash: last_flashblock.diff.block_hash, - compute_state_root, - pending_parent, - }, - }) - } - - /// Records the result of building a sequence and re-broadcasts with execution outcome. - /// - /// Updates execution outcome and cached reads. For cached sequences (already broadcast - /// once during finalize), this broadcasts again with the computed `state_root`, allowing - /// the consensus client to submit via `engine_newPayload`. - pub(crate) fn on_build_complete( - &mut self, - ticket: BuildTicket, - result: Option<(PendingFlashBlock, CachedReads)>, - ) -> BuildApplyOutcome { - let Some((computed_block, cached_reads)) = result else { - return BuildApplyOutcome::SkippedNoBuildResult; - }; - - // Extract execution outcome - let execution_outcome = computed_block.computed_state_root().map(|state_root| { - SequenceExecutionOutcome { block_hash: computed_block.block().hash(), state_root } - }); - - let outcome = self.apply_build_outcome(ticket, execution_outcome, cached_reads); - match outcome { - BuildApplyOutcome::SkippedNoBuildResult | BuildApplyOutcome::AppliedPending => {} - BuildApplyOutcome::AppliedCached { rebroadcasted } => { - trace!( - target: "flashblocks", - ticket = ?ticket, - rebroadcasted, - "Applied cached build completion" - ); - } - BuildApplyOutcome::RejectedPendingSequenceMismatch { - ticket_sequence_id, - current_sequence_id, - } => { - trace!( - target: "flashblocks", - ticket = ?ticket, - ?ticket_sequence_id, - ?current_sequence_id, - "Rejected build completion: pending sequence mismatch" - ); - } - BuildApplyOutcome::RejectedPendingRevisionStale { - sequence_id, - ticket_revision, - current_revision, - } => { - trace!( - target: "flashblocks", - ticket = ?ticket, - ?sequence_id, - ticket_revision, - current_revision, - "Rejected build completion: pending revision stale" - ); - } - BuildApplyOutcome::RejectedCachedSequenceMissing { sequence_id } => { - trace!( - target: "flashblocks", - ticket = ?ticket, - ?sequence_id, - "Rejected build completion: cached sequence missing" - ); - } - } - outcome - } - - /// Applies build output to the exact sequence targeted by the build job. - /// - /// Returns the apply outcome with explicit rejection reasons for observability. - fn apply_build_outcome( - &mut self, - ticket: BuildTicket, - execution_outcome: Option, - cached_reads: CachedReads, - ) -> BuildApplyOutcome { - match ticket.snapshot { - SequenceSnapshot::Pending { revision } => { - let current_sequence_id = SequenceId::from_pending(self.pending.sequence()); - if current_sequence_id != Some(ticket.sequence_id) { - return BuildApplyOutcome::RejectedPendingSequenceMismatch { - ticket_sequence_id: ticket.sequence_id, - current_sequence_id, - }; - } - - let current_revision = self.pending.revision(); - if current_revision != revision { - return BuildApplyOutcome::RejectedPendingRevisionStale { - sequence_id: ticket.sequence_id, - ticket_revision: revision, - current_revision, - }; - } - - { - self.pending.sequence.set_execution_outcome(execution_outcome); - self.pending.sequence.set_cached_reads(cached_reads); - self.pending.mark_revision_applied(current_revision); - trace!( - target: "flashblocks", - block_number = self.pending.sequence.block_number(), - ticket = ?ticket, - has_computed_state_root = execution_outcome.is_some(), - "Updated pending sequence with build results" - ); - } - BuildApplyOutcome::AppliedPending - } - SequenceSnapshot::Cached => { - if let Some((cached, _)) = self.cached_entry_mut_by_id(ticket.sequence_id) { - let (needs_rebroadcast, rebroadcast_sequence) = { - // Only re-broadcast if we computed new information (state_root was - // missing). If sequencer already provided - // state_root, we already broadcast in - // insert_flashblock, so skip re-broadcast to avoid duplicate FCU calls. - let needs_rebroadcast = - execution_outcome.is_some() && cached.execution_outcome().is_none(); - - cached.set_execution_outcome(execution_outcome); - - let rebroadcast_sequence = needs_rebroadcast.then_some(cached.clone()); - (needs_rebroadcast, rebroadcast_sequence) - }; - self.applied_cached_sequences.insert(ticket.sequence_id); - - if let Some(sequence) = rebroadcast_sequence - && self.block_broadcaster.receiver_count() > 0 - { - trace!( - target: "flashblocks", - block_number = sequence.block_number(), - ticket = ?ticket, - "Re-broadcasting sequence with computed state_root" - ); - let _ = self.block_broadcaster.send(sequence); - } - BuildApplyOutcome::AppliedCached { rebroadcasted: needs_rebroadcast } - } else { - BuildApplyOutcome::RejectedCachedSequenceMissing { - sequence_id: ticket.sequence_id, - } - } - } - } - } - - /// Returns the earliest block number in the pending or cached sequences. - pub(crate) fn earliest_block_number(&self) -> Option { - match (self.pending.sequence.block_number(), self.cached_min_block_number) { - (Some(pending_block), Some(cache_min)) => Some(cache_min.min(pending_block)), - (Some(pending_block), None) => Some(pending_block), - (None, Some(cache_min)) => Some(cache_min), - (None, None) => None, - } - } - - /// Returns the latest block number in the pending or cached sequences. - pub(crate) fn latest_block_number(&self) -> Option { - // Pending is always the latest if it exists - if let Some(pending_block) = self.pending.sequence.block_number() { - return Some(pending_block); - } - - // Fall back to cache - self.completed_cache.iter().map(|(seq, _)| seq.block_number()).max() - } - - /// Returns the tracked block fingerprint for the given block number from pending or cached - /// sequences, if available. - fn tracked_fingerprint_for_block(&self, block_number: u64) -> Option { - // Check pending sequence - if self.pending.sequence.block_number() == Some(block_number) { - let base = self.pending.sequence.payload_base()?; - let last_flashblock = self.pending.sequence.last_flashblock()?; - let tx_hashes = self.pending.tx_hashes(); - return Some(TrackedBlockFingerprint { - block_number, - block_hash: last_flashblock.diff.block_hash, - parent_hash: base.parent_hash, - tx_hashes, - }); - } - - // Check cached sequences (newest first). Multiple payload variants for the same block - // number can coexist in cache; reorg checks must use the newest tracked variant. - for (seq, txs) in self.completed_cache.iter().rev() { - if seq.block_number() == block_number { - let tx_hashes = txs.iter().map(|tx| *tx.tx_hash()).collect(); - return Some(TrackedBlockFingerprint { - block_number, - block_hash: seq.last().diff.block_hash, - parent_hash: seq.payload_base().parent_hash, - tx_hashes, - }); - } - } - - None - } - - /// Processes a canonical block and reconciles pending state. - /// - /// This method determines how to handle the pending flashblock state when a new - /// canonical block arrives. It uses the [`CanonicalBlockReconciler`] to decide - /// the appropriate strategy based on: - /// - Whether canonical has caught up to pending - /// - Whether a reorg was detected (transaction mismatch) - /// - Whether pending is too far ahead of canonical - /// - /// Returns the reconciliation strategy that was applied. - pub(crate) fn process_canonical_block( - &mut self, - canonical: CanonicalBlockFingerprint, - max_depth: u64, - ) -> ReconciliationStrategy { - let canonical_block_number = canonical.block_number; - let earliest = self.earliest_block_number(); - let latest = self.latest_block_number(); - - // Only run reorg detection if we actually track the canonical block number. - let reorg_detected = self - .tracked_fingerprint_for_block(canonical_block_number) - .map(|tracked| ReorgDetector::detect(&tracked, &canonical).is_reorg()) - .unwrap_or(false); - - // Determine reconciliation strategy - let strategy = CanonicalBlockReconciler::reconcile( - earliest, - latest, - canonical_block_number, - max_depth, - reorg_detected, - ); - - match &strategy { - ReconciliationStrategy::CatchUp => { - trace!( - target: "flashblocks", - ?latest, - canonical_block_number, - "Canonical caught up - clearing pending state" - ); - self.clear_all(); - } - ReconciliationStrategy::HandleReorg => { - warn!( - target: "flashblocks", - canonical_block_number, - canonical_tx_count = canonical.tx_hashes.len(), - canonical_parent_hash = ?canonical.parent_hash, - canonical_block_hash = ?canonical.block_hash, - "Reorg detected - clearing pending state" - ); - self.clear_all(); - } - ReconciliationStrategy::DepthLimitExceeded { depth, max_depth } => { - trace!( - target: "flashblocks", - depth, - max_depth, - "Depth limit exceeded - clearing pending state" - ); - self.clear_all(); - } - ReconciliationStrategy::Continue => { - trace!( - target: "flashblocks", - ?earliest, - ?latest, - canonical_block_number, - "Canonical behind pending - continuing" - ); - } - ReconciliationStrategy::NoPendingState => { - trace!( - target: "flashblocks", - canonical_block_number, - "No pending state to reconcile" - ); - } - } - - strategy - } - - /// Clears all pending and cached state. - fn clear_all(&mut self) { - self.pending.clear(); - self.completed_cache.clear(); - self.applied_cached_sequences.clear(); - self.cached_min_block_number = None; - } - - #[cfg(test)] - fn pending_transaction_count(&self) -> usize { - self.pending.transaction_count() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - test_utils::TestFlashBlockFactory, - validation::{CanonicalBlockFingerprint, ReconciliationStrategy}, - }; - use alloy_primitives::B256; - use alloy_rpc_types_engine::PayloadId; - use op_alloy_consensus::OpTxEnvelope; - use reth_optimism_primitives::OpPrimitives; - - fn canonical_for( - manager: &SequenceManager, - block_number: u64, - tx_hashes: Vec, - ) -> CanonicalBlockFingerprint { - if let Some(tracked) = manager.tracked_fingerprint_for_block(block_number) { - CanonicalBlockFingerprint { - block_number, - block_hash: tracked.block_hash, - parent_hash: tracked.parent_hash, - tx_hashes, - } - } else { - CanonicalBlockFingerprint { - block_number, - block_hash: B256::repeat_byte(0xFE), - parent_hash: B256::repeat_byte(0xFD), - tx_hashes, - } - } - } - - #[test] - fn test_sequence_manager_new() { - let manager: SequenceManager = SequenceManager::new(true); - assert_eq!(manager.pending().count(), 0); - } - - #[test] - fn test_insert_flashblock_creates_pending_sequence() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0).unwrap(); - - assert_eq!(manager.pending().count(), 1); - assert_eq!(manager.pending().block_number(), Some(100)); - } - - #[test] - fn test_insert_flashblock_caches_completed_sequence() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build first sequence - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - let fb1 = factory.flashblock_after(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - // Insert new base (index 0) which should finalize and cache previous sequence - let fb2 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb2).unwrap(); - - // New sequence should be pending - assert_eq!(manager.pending().count(), 1); - assert_eq!(manager.pending().block_number(), Some(101)); - assert_eq!(manager.completed_cache.len(), 1); - let (cached_sequence, _txs) = manager.completed_cache.get(0).unwrap(); - assert_eq!(cached_sequence.block_number(), 100); - } - - #[test] - fn test_next_buildable_args_returns_none_when_empty() { - let mut manager: SequenceManager = SequenceManager::new(true); - let local_tip_hash = B256::random(); - let local_tip_timestamp = 1000; - - let args = - manager.next_buildable_args::(local_tip_hash, local_tip_timestamp, None); - assert!(args.is_none()); - } - - #[test] - fn test_next_buildable_args_matches_pending_parent() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb0).unwrap(); - - let args = manager.next_buildable_args::(parent_hash, 1000000, None); - assert!(args.is_some()); - - let build_args = args.unwrap(); - assert_eq!(build_args.last_flashblock_index, 0); - } - - #[test] - fn test_next_buildable_args_returns_none_when_parent_mismatch() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0).unwrap(); - - // Use different parent hash - let wrong_parent = B256::random(); - let args = manager.next_buildable_args::(wrong_parent, 1000000, None); - assert!(args.is_none()); - } - - #[test] - fn test_next_buildable_args_prefers_pending_over_cached() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Create and finalize first sequence - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Create new sequence (finalizes previous) - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - let parent_hash = fb1.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb1).unwrap(); - - // Request with first sequence's parent (should find cached) - let args = manager.next_buildable_args::(parent_hash, 1000000, None); - assert!(args.is_some()); - } - - #[test] - fn test_next_buildable_args_finds_cached_sequence() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build and cache first sequence - let fb0 = factory.flashblock_at(0).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Start new sequence to finalize first - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - // Clear pending by starting another sequence - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - - // Request first sequence's parent - should find in cache - let args = manager.next_buildable_args::(parent_hash, 1000000, None); - assert!(args.is_some()); - } - - #[test] - fn test_next_buildable_args_uses_newest_cached_when_parent_hash_shared() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let shared_parent = B256::repeat_byte(0x44); - let payload_a = PayloadId::new([0xAA; 8]); - let payload_b = PayloadId::new([0xBB; 8]); - - // Sequence A for block 100 (will become cached first). - let fb_a0 = factory - .flashblock_at(0) - .block_number(100) - .parent_hash(shared_parent) - .payload_id(payload_a) - .build(); - manager.insert_flashblock(fb_a0).unwrap(); - - // Sequence B for the same parent hash and block number (different payload id). - // Inserting index 0 finalizes/caches sequence A. - let fb_b0 = factory - .flashblock_at(0) - .block_number(100) - .parent_hash(shared_parent) - .payload_id(payload_b) - .build(); - manager.insert_flashblock(fb_b0.clone()).unwrap(); - - // Finalize/cache sequence B. - let fb_next = factory.flashblock_for_next_block(&fb_b0).build(); - manager.insert_flashblock(fb_next).unwrap(); - - let candidate = manager - .next_buildable_args::(shared_parent, 1_000_000, None) - .expect("shared parent should resolve to a cached sequence"); - - // Newest sequence (B) should be selected deterministically. - assert_eq!(candidate.ticket.sequence_id.payload_id, payload_b); - assert_eq!(candidate.last_flashblock_hash, fb_b0.diff.block_hash); - } - - #[test] - fn test_next_buildable_args_skips_executed_cached_and_advances_speculative() { - use crate::pending_state::PendingBlockState; - use reth_execution_types::BlockExecutionOutput; - use reth_revm::cached::CachedReads; - use std::sync::Arc; - - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Block 100 with three flashblocks. - let fb100_0 = factory.flashblock_at(0).build(); - let local_tip_hash = fb100_0.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb100_0.clone()).unwrap(); - let fb100_1 = factory.flashblock_after(&fb100_0).build(); - manager.insert_flashblock(fb100_1.clone()).unwrap(); - let fb100_2 = factory.flashblock_after(&fb100_1).build(); - manager.insert_flashblock(fb100_2.clone()).unwrap(); - - // First flashblock of block 101 finalizes block 100 into cache. - let fb101_0 = factory.flashblock_for_next_block(&fb100_2).build(); - manager.insert_flashblock(fb101_0.clone()).unwrap(); - - // First build picks canonical-attached cached block 100. - let first = manager - .next_buildable_args::(local_tip_hash, 1_000_000, None) - .expect("cached block should be buildable first"); - assert!(matches!(first.ticket.snapshot, SequenceSnapshot::Cached)); - assert_eq!(first.base.block_number, fb100_0.block_number()); - - // Mark cached block 100 as executed. - let applied = manager.apply_build_outcome( - first.ticket, - Some(SequenceExecutionOutcome { - block_hash: B256::repeat_byte(0x33), - state_root: B256::repeat_byte(0x44), - }), - CachedReads::default(), - ); - assert!(matches!( - applied, - BuildApplyOutcome::AppliedCached { rebroadcasted: true | false } - )); - - // Speculative state for block 100 should unlock block 101/index0. - let pending_state = PendingBlockState:: { - block_hash: fb101_0.base.as_ref().unwrap().parent_hash, - block_number: fb100_0.block_number(), - parent_hash: local_tip_hash, - canonical_anchor_hash: local_tip_hash, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - let second = manager - .next_buildable_args(local_tip_hash, 1_000_000, Some(pending_state)) - .expect("speculative pending block should be buildable next"); - assert!(matches!(second.ticket.snapshot, SequenceSnapshot::Pending { .. })); - assert_eq!(second.base.block_number, fb101_0.block_number()); - assert!(second.pending_parent.is_some()); - } - - #[test] - fn test_cached_sequence_with_provided_state_root_not_reselected_after_apply() { - use reth_revm::cached::CachedReads; - - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - let provided_root = B256::repeat_byte(0xA5); - - // Block 100 sequence has non-zero state root from sequencer. - let fb100_0 = factory.flashblock_at(0).state_root(provided_root).build(); - let local_tip_hash = fb100_0.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb100_0.clone()).unwrap(); - - let fb100_1 = factory.flashblock_after(&fb100_0).state_root(provided_root).build(); - manager.insert_flashblock(fb100_1.clone()).unwrap(); - - let fb100_2 = factory.flashblock_after(&fb100_1).state_root(provided_root).build(); - manager.insert_flashblock(fb100_2.clone()).unwrap(); - - // First flashblock of block 101 finalizes block 100 into cache. - let fb101_0 = factory.flashblock_for_next_block(&fb100_2).build(); - manager.insert_flashblock(fb101_0).unwrap(); - - let candidate = manager - .next_buildable_args::(local_tip_hash, 1_000_000, None) - .expect("cached sequence should be buildable once"); - assert!(matches!(candidate.ticket.snapshot, SequenceSnapshot::Cached)); - assert!( - !candidate.compute_state_root, - "non-zero sequencer root should skip local root compute" - ); - - let applied = manager.apply_build_outcome(candidate.ticket, None, CachedReads::default()); - assert!(matches!(applied, BuildApplyOutcome::AppliedCached { rebroadcasted: false })); - - let repeated = manager.next_buildable_args::(local_tip_hash, 1_000_000, None); - assert!( - repeated.is_none(), - "cached sequence with provided state root must not be reselected after apply" - ); - } - - #[test] - fn test_delayed_canonical_allows_speculative_next_block_index_zero() { - use crate::pending_state::PendingBlockState; - use reth_execution_types::BlockExecutionOutput; - use reth_revm::cached::CachedReads; - use std::sync::Arc; - - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Canonical tip is block 9. Flashblocks for block 10 all build on block 9. - let canonical_9_hash = B256::repeat_byte(0x09); - let fb10_0 = factory - .flashblock_at(0) - .block_number(10) - .parent_hash(canonical_9_hash) - .block_hash(B256::repeat_byte(0x10)) - .build(); - manager.insert_flashblock(fb10_0.clone()).unwrap(); - - let fb10_1 = factory.flashblock_after(&fb10_0).block_hash(B256::repeat_byte(0x11)).build(); - manager.insert_flashblock(fb10_1.clone()).unwrap(); - - let fb10_2 = factory.flashblock_after(&fb10_1).block_hash(B256::repeat_byte(0x12)).build(); - manager.insert_flashblock(fb10_2.clone()).unwrap(); - - // First flashblock for block 11 arrives before canonical block 10. - let fb11_0 = - factory.flashblock_for_next_block(&fb10_2).block_hash(B256::repeat_byte(0x20)).build(); - manager.insert_flashblock(fb11_0.clone()).unwrap(); - - // Build block 10 first from canonical tip (cached canonical-attached sequence). - let block10_candidate = manager - .next_buildable_args::(canonical_9_hash, 1_000_000, None) - .expect("block 10 should be buildable from canonical tip"); - assert_eq!(block10_candidate.base.block_number, 10); - assert!(matches!(block10_candidate.ticket.snapshot, SequenceSnapshot::Cached)); - - let applied = manager.apply_build_outcome( - block10_candidate.ticket, - Some(SequenceExecutionOutcome { - block_hash: fb11_0.base.as_ref().unwrap().parent_hash, - state_root: B256::repeat_byte(0xAA), - }), - CachedReads::default(), - ); - assert!(matches!( - applied, - BuildApplyOutcome::AppliedCached { rebroadcasted: true | false } - )); - - // Speculative state produced by block 10 should unlock block 11/index 0 - // even though canonical block 10 has not arrived yet. - let pending_state_10 = PendingBlockState:: { - block_hash: fb11_0.base.as_ref().unwrap().parent_hash, - block_number: 10, - parent_hash: canonical_9_hash, - canonical_anchor_hash: canonical_9_hash, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - let before_canonical_10 = manager - .next_buildable_args(canonical_9_hash, 1_000_000, Some(pending_state_10.clone())) - .expect("block 11/index0 should be buildable speculatively before canonical block 10"); - assert_eq!(before_canonical_10.base.block_number, 11); - assert!(before_canonical_10.pending_parent.is_some()); - assert_eq!( - before_canonical_10.pending_parent.as_ref().unwrap().canonical_anchor_hash, - canonical_9_hash - ); - - // Canonical block 10 arrives later: strategy must be Continue (do not clear pending state). - let strategy = manager.process_canonical_block(canonical_for(&manager, 10, vec![]), 64); - assert_eq!(strategy, ReconciliationStrategy::Continue); - - // Block 11/index0 must remain buildable after delayed canonical block 10. - let after_canonical_10 = manager - .next_buildable_args(canonical_9_hash, 1_000_000, Some(pending_state_10)) - .expect("block 11/index0 should remain buildable after delayed canonical block 10"); - assert_eq!(after_canonical_10.base.block_number, 11); - assert!(after_canonical_10.pending_parent.is_some()); - } - - #[test] - fn test_cached_entry_lookup_is_exact_by_sequence_id() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let shared_parent = B256::repeat_byte(0x55); - let payload_a = PayloadId::new([0x0A; 8]); - let payload_b = PayloadId::new([0x0B; 8]); - - let fb_a0 = factory - .flashblock_at(0) - .block_number(100) - .parent_hash(shared_parent) - .payload_id(payload_a) - .build(); - manager.insert_flashblock(fb_a0).unwrap(); - - let fb_b0 = factory - .flashblock_at(0) - .block_number(100) - .parent_hash(shared_parent) - .payload_id(payload_b) - .build(); - manager.insert_flashblock(fb_b0.clone()).unwrap(); - - // Finalize/cache sequence B. - let fb_next = factory.flashblock_for_next_block(&fb_b0).build(); - manager.insert_flashblock(fb_next).unwrap(); - - let seq_a_id = - SequenceId { block_number: 100, payload_id: payload_a, parent_hash: shared_parent }; - let seq_b_id = - SequenceId { block_number: 100, payload_id: payload_b, parent_hash: shared_parent }; - - let (seq_a, _) = manager - .cached_entry_mut_by_id(seq_a_id) - .expect("sequence A should be found by exact id"); - assert_eq!(seq_a.payload_id(), payload_a); - - let (seq_b, _) = manager - .cached_entry_mut_by_id(seq_b_id) - .expect("sequence B should be found by exact id"); - assert_eq!(seq_b.payload_id(), payload_b); - } - - #[test] - fn test_reorg_detection_uses_newest_cached_variant_for_block_number() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let shared_parent = B256::repeat_byte(0x66); - let payload_a = PayloadId::new([0x1A; 8]); - let payload_b = PayloadId::new([0x1B; 8]); - - // Sequence A for block 100 (cached first). - let fb_a0 = factory - .flashblock_at(0) - .block_number(100) - .parent_hash(shared_parent) - .payload_id(payload_a) - .block_hash(B256::repeat_byte(0xA1)) - .build(); - manager.insert_flashblock(fb_a0).unwrap(); - - // Sequence B for the same block number/parent (cached second = newest). - let fb_b0 = factory - .flashblock_at(0) - .block_number(100) - .parent_hash(shared_parent) - .payload_id(payload_b) - .block_hash(B256::repeat_byte(0xB1)) - .build(); - manager.insert_flashblock(fb_b0.clone()).unwrap(); - - // Finalize/cache B and start pending block 101. - let fb_next = factory.flashblock_for_next_block(&fb_b0).build(); - manager.insert_flashblock(fb_next).unwrap(); - - let tracked = manager - .tracked_fingerprint_for_block(100) - .expect("tracked fingerprint for block 100 should exist"); - assert_eq!( - tracked.block_hash, fb_b0.diff.block_hash, - "reorg detection must use newest cached variant for a shared block number" - ); - - // Canonical matches newest variant B; this must not be treated as reorg. - let canonical = CanonicalBlockFingerprint { - block_number: 100, - block_hash: fb_b0.diff.block_hash, - parent_hash: shared_parent, - tx_hashes: tracked.tx_hashes, - }; - - let strategy = manager.process_canonical_block(canonical, 64); - assert_eq!(strategy, ReconciliationStrategy::Continue); - assert_eq!(manager.pending().block_number(), Some(101)); - assert!(!manager.completed_cache.is_empty()); - } - - #[test] - fn test_on_build_complete_ignores_unknown_sequence_id() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build one cached sequence and one pending sequence. - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - assert_eq!(manager.completed_cache.len(), 1); - assert!(manager.completed_cache.get(0).unwrap().0.execution_outcome().is_none()); - - let pending_parent = manager.pending().payload_base().unwrap().parent_hash; - let before = manager - .next_buildable_args::(pending_parent, 1_000_000, None) - .expect("pending sequence should be buildable"); - assert!(before.cached_state.is_none(), "pending sequence must start without cached reads"); - - let cached = &manager.completed_cache.get(0).unwrap().0; - let stale_payload = if cached.payload_id() == PayloadId::new([0xEE; 8]) { - PayloadId::new([0xEF; 8]) - } else { - PayloadId::new([0xEE; 8]) - }; - let stale_id = SequenceId { - block_number: cached.block_number(), - payload_id: stale_payload, - parent_hash: cached.payload_base().parent_hash, - }; - let stale_ticket = BuildTicket::cached(stale_id); - - let applied = manager.apply_build_outcome( - stale_ticket, - Some(SequenceExecutionOutcome { - block_hash: B256::repeat_byte(0x11), - state_root: B256::repeat_byte(0x22), - }), - reth_revm::cached::CachedReads::default(), - ); - assert!(matches!(applied, BuildApplyOutcome::RejectedCachedSequenceMissing { .. })); - - // Unknown sequence IDs must never mutate tracked pending/cached state. - let after = manager - .next_buildable_args::(pending_parent, 1_000_000, None) - .expect("pending sequence should remain buildable"); - assert!(after.cached_state.is_none(), "stale completion must not attach cached reads"); - - // Finalize current pending sequence and ensure no synthetic execution outcome was injected. - let pending_block_number = manager.pending().block_number().unwrap(); - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - let finalized_pending = manager - .completed_cache - .iter() - .find(|(seq, _)| seq.block_number() == pending_block_number) - .expect("pending sequence should be finalized into cache") - .0 - .clone(); - assert!(finalized_pending.execution_outcome().is_none()); - - assert!(manager.completed_cache.get(0).unwrap().0.execution_outcome().is_none()); - } - - #[test] - fn test_pending_build_ticket_rejects_stale_revision() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb0.clone()).unwrap(); - - let first_candidate = manager - .next_buildable_args::(parent_hash, 1_000_000, None) - .expect("initial pending sequence should be buildable"); - let stale_ticket = first_candidate.ticket; - - // Pending sequence advances while the old build would be in-flight. - let fb1 = factory.flashblock_after(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - let stale_applied = manager.apply_build_outcome( - stale_ticket, - Some(SequenceExecutionOutcome { - block_hash: B256::repeat_byte(0x31), - state_root: B256::repeat_byte(0x32), - }), - reth_revm::cached::CachedReads::default(), - ); - assert!( - matches!(stale_applied, BuildApplyOutcome::RejectedPendingRevisionStale { .. }), - "stale pending ticket must be rejected" - ); - - // Fresh ticket for the current revision should still apply. - let fresh_candidate = manager - .next_buildable_args::(parent_hash, 1_000_000, None) - .expect("advanced pending sequence should remain buildable"); - assert_eq!(fresh_candidate.last_flashblock_hash, fb1.diff.block_hash); - assert!(fresh_candidate.cached_state.is_none()); - - let fresh_applied = manager.apply_build_outcome( - fresh_candidate.ticket, - Some(SequenceExecutionOutcome { - block_hash: B256::repeat_byte(0x41), - state_root: B256::repeat_byte(0x42), - }), - reth_revm::cached::CachedReads::default(), - ); - assert!(matches!(fresh_applied, BuildApplyOutcome::AppliedPending)); - - let with_same_revision = - manager.next_buildable_args::(parent_hash, 1_000_000, None); - assert!( - with_same_revision.is_none(), - "applied pending revision must not be rebuilt until sequence revision advances" - ); - - // Once pending data advances, the next revision should be buildable and use cached reads. - let fb2 = factory.flashblock_after(&fb1).build(); - manager.insert_flashblock(fb2.clone()).unwrap(); - - let with_cached_state = manager - .next_buildable_args::(parent_hash, 1_000_000, None) - .expect("pending sequence should be buildable after revision advances"); - assert_eq!(with_cached_state.last_flashblock_hash, fb2.diff.block_hash); - assert!( - with_cached_state.cached_state.is_some(), - "fresh completion should attach cached reads once pending revision advances" - ); - } - - #[test] - fn test_compute_state_root_logic_near_expected_final() { - let mut manager: SequenceManager = SequenceManager::new(true); - let block_time = 2u64; - let factory = TestFlashBlockFactory::new().with_block_time(block_time); - - // Create sequence with zero state root (needs computation) - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - let base_timestamp = fb0.base.as_ref().unwrap().timestamp; - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Add flashblocks up to expected final index (2000ms / 200ms = 10) - for i in 1..=9 { - let fb = factory.flashblock_after(&fb0).index(i).state_root(B256::ZERO).build(); - manager.insert_flashblock(fb).unwrap(); - } - - // Request with proper timing - should compute state root for index 9 - let args = manager.next_buildable_args::( - parent_hash, - base_timestamp - block_time, - None, - ); - assert!(args.is_some()); - assert!(args.unwrap().compute_state_root); - } - - #[test] - fn test_no_compute_state_root_when_provided_by_sequencer() { - let mut manager: SequenceManager = SequenceManager::new(true); - let block_time = 2u64; - let factory = TestFlashBlockFactory::new().with_block_time(block_time); - - // Create sequence with non-zero state root (provided by sequencer) - let fb0 = factory.flashblock_at(0).state_root(B256::random()).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - let base_timestamp = fb0.base.as_ref().unwrap().timestamp; - manager.insert_flashblock(fb0).unwrap(); - - let args = manager.next_buildable_args::( - parent_hash, - base_timestamp - block_time, - None, - ); - assert!(args.is_some()); - assert!(!args.unwrap().compute_state_root); - } - - #[test] - fn test_no_compute_state_root_when_disabled() { - let mut manager: SequenceManager = SequenceManager::new(false); - let block_time = 2u64; - let factory = TestFlashBlockFactory::new().with_block_time(block_time); - - // Create sequence with zero state root (needs computation) - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - let base_timestamp = fb0.base.as_ref().unwrap().timestamp; - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Add flashblocks up to expected final index (2000ms / 200ms = 10) - for i in 1..=9 { - let fb = factory.flashblock_after(&fb0).index(i).state_root(B256::ZERO).build(); - manager.insert_flashblock(fb).unwrap(); - } - - // Request with proper timing - should compute state root for index 9 - let args = manager.next_buildable_args::( - parent_hash, - base_timestamp - block_time, - None, - ); - assert!(args.is_some()); - assert!(!args.unwrap().compute_state_root); - } - - #[test] - fn test_compute_state_root_with_timestamp_skew_does_not_underflow() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - let base_timestamp = fb0.base.as_ref().unwrap().timestamp; - manager.insert_flashblock(fb0).unwrap(); - - // Local tip timestamp can be ahead briefly in skewed/out-of-order conditions. - // This should not panic due to arithmetic underflow. - let args = - manager.next_buildable_args::(parent_hash, base_timestamp + 1, None); - assert!(args.is_some()); - } - - #[test] - fn test_cache_ring_buffer_evicts_oldest() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Fill cache with 4 sequences (cache size is 3, so oldest should be evicted) - let mut last_fb = factory.flashblock_at(0).build(); - manager.insert_flashblock(last_fb.clone()).unwrap(); - - for _ in 0..3 { - last_fb = factory.flashblock_for_next_block(&last_fb).build(); - manager.insert_flashblock(last_fb.clone()).unwrap(); - } - - // The first sequence should have been evicted, so we can't build it - let first_parent = factory.flashblock_at(0).build().base.unwrap().parent_hash; - let args = manager.next_buildable_args::(first_parent, 1000000, None); - // Should not find it (evicted from ring buffer) - assert!(args.is_none()); - } - - // ==================== Canonical Block Reconciliation Tests ==================== - - #[test] - fn test_process_canonical_block_no_pending_state() { - let mut manager: SequenceManager = SequenceManager::new(true); - - // No pending state, should return NoPendingState - let canonical = canonical_for(&manager, 100, vec![]); - let strategy = manager.process_canonical_block(canonical, 10); - assert_eq!(strategy, ReconciliationStrategy::NoPendingState); - } - - #[test] - fn test_process_canonical_block_catchup() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Insert a flashblock sequence for block 100 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0).unwrap(); - - assert_eq!(manager.pending().block_number(), Some(100)); - - // Canonical catches up to block 100 - let canonical = canonical_for(&manager, 100, vec![]); - let strategy = manager.process_canonical_block(canonical, 10); - assert_eq!(strategy, ReconciliationStrategy::CatchUp); - - // Pending state should be cleared - assert!(manager.pending().block_number().is_none()); - } - - #[test] - fn test_process_canonical_block_continue() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Insert flashblocks for block 100-102 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - - // Canonical at 99 (behind pending) - let canonical = canonical_for(&manager, 99, vec![]); - let strategy = manager.process_canonical_block(canonical, 10); - assert_eq!(strategy, ReconciliationStrategy::Continue); - - // Pending state should still exist - assert!(manager.pending().block_number().is_some()); - } - - #[test] - fn test_process_canonical_block_depth_limit_exceeded() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Insert flashblocks for block 100-102 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - - // At this point: earliest=100, latest=102 - // Canonical at 105 with max_depth of 2 (depth = 105 - 100 = 5, which exceeds 2) - // But wait - if canonical >= latest, it's CatchUp. So canonical must be < latest (102). - // Let's use canonical=101, which is < 102 but depth = 101 - 100 = 1 > 0 - let canonical = canonical_for(&manager, 101, vec![]); - let strategy = manager.process_canonical_block(canonical, 0); - assert!(matches!(strategy, ReconciliationStrategy::DepthLimitExceeded { .. })); - - // Pending state should be cleared - assert!(manager.pending().block_number().is_none()); - } - - #[test] - fn test_earliest_and_latest_block_numbers() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Initially no blocks - assert!(manager.earliest_block_number().is_none()); - assert!(manager.latest_block_number().is_none()); - - // Insert first flashblock (block 100) - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - assert_eq!(manager.earliest_block_number(), Some(100)); - assert_eq!(manager.latest_block_number(), Some(100)); - - // Insert next block (block 101) - this caches block 100 - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - assert_eq!(manager.earliest_block_number(), Some(100)); - assert_eq!(manager.latest_block_number(), Some(101)); - - // Insert another block (block 102) - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - - assert_eq!(manager.earliest_block_number(), Some(100)); - assert_eq!(manager.latest_block_number(), Some(102)); - } - - #[test] - fn test_earliest_block_number_tracks_cache_rollover() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2.clone()).unwrap(); - - let fb3 = factory.flashblock_for_next_block(&fb2).build(); - manager.insert_flashblock(fb3.clone()).unwrap(); - - let fb4 = factory.flashblock_for_next_block(&fb3).build(); - manager.insert_flashblock(fb4).unwrap(); - - // Cache size is 3, so block 100 should have been evicted. - assert_eq!(manager.earliest_block_number(), Some(101)); - assert_eq!(manager.latest_block_number(), Some(104)); - } - - // ==================== Speculative Building Tests ==================== - - #[test] - fn test_speculative_build_with_pending_parent_state() { - use crate::pending_state::PendingBlockState; - use reth_execution_types::BlockExecutionOutput; - use reth_revm::cached::CachedReads; - use std::sync::Arc; - - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Create a flashblock for block 101 - let fb0 = factory.flashblock_at(0).block_number(101).build(); - // The parent_hash of block 101 should be the hash of block 100 - let block_100_hash = fb0.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb0).unwrap(); - - // Local tip is block 99 (not matching block 100's hash) - let local_tip_hash = B256::random(); - - // Without pending parent state, no args should be returned - let args = manager.next_buildable_args::(local_tip_hash, 1000000, None); - assert!(args.is_none()); - - // Create pending parent state for block 100 (its block_hash matches fb0's parent_hash) - let parent_hash = B256::random(); - let pending_state: PendingBlockState = PendingBlockState { - block_hash: block_100_hash, - block_number: 100, - parent_hash, - canonical_anchor_hash: parent_hash, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - // With pending parent state, should return args for speculative building - let args = manager.next_buildable_args(local_tip_hash, 1000000, Some(pending_state)); - assert!(args.is_some()); - let build_args = args.unwrap(); - assert!(build_args.pending_parent.is_some()); - assert_eq!(build_args.pending_parent.as_ref().unwrap().block_number, 100); - } - - #[test] - fn test_speculative_build_uses_cached_sequence() { - use crate::pending_state::PendingBlockState; - use reth_execution_types::BlockExecutionOutput; - use reth_revm::cached::CachedReads; - use std::sync::Arc; - - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Create and cache first sequence for block 100 - let fb0 = factory.flashblock_at(0).build(); - let block_99_hash = fb0.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Create second sequence for block 101 (this caches block 100) - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - // Create third sequence for block 102 (this caches block 101) - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - - // Local tip is some random hash (not matching any sequence parent) - let local_tip_hash = B256::random(); - - // Create pending parent state that matches the cached block 100 sequence's parent - let parent_hash = B256::random(); - let pending_state: PendingBlockState = PendingBlockState { - block_hash: block_99_hash, - block_number: 99, - parent_hash, - canonical_anchor_hash: parent_hash, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - // Should find cached sequence for block 100 (whose parent is block_99_hash) - let args = manager.next_buildable_args(local_tip_hash, 1000000, Some(pending_state)); - assert!(args.is_some()); - let build_args = args.unwrap(); - assert!(build_args.pending_parent.is_some()); - assert_eq!(build_args.base.block_number, 100); - } - - #[test] - fn test_canonical_build_takes_priority_over_speculative() { - use crate::pending_state::PendingBlockState; - use reth_execution_types::BlockExecutionOutput; - use reth_revm::cached::CachedReads; - use std::sync::Arc; - - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Create a flashblock for block 100 - let fb0 = factory.flashblock_at(0).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - manager.insert_flashblock(fb0).unwrap(); - - // Create pending parent state with a different block hash - let pending_parent_hash = B256::random(); - let pending_state: PendingBlockState = PendingBlockState { - block_hash: B256::repeat_byte(0xAA), - block_number: 99, - parent_hash: pending_parent_hash, - canonical_anchor_hash: pending_parent_hash, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - // Local tip matches the sequence parent (canonical mode should take priority) - let args = manager.next_buildable_args(parent_hash, 1000000, Some(pending_state)); - assert!(args.is_some()); - let build_args = args.unwrap(); - // Should be canonical build (no pending_parent) - assert!(build_args.pending_parent.is_none()); - } - - // ==================== Reconciliation Cache Clearing Tests ==================== - - #[test] - fn test_catchup_clears_all_cached_sequences() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build up cached sequences for blocks 100, 101, 102 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - - // Verify we have cached sequences - assert_eq!(manager.completed_cache.len(), 2); - assert!(manager.pending().block_number().is_some()); - - // Canonical catches up to 102 - should clear everything - let canonical = canonical_for(&manager, 102, vec![]); - let strategy = manager.process_canonical_block(canonical, 10); - assert_eq!(strategy, ReconciliationStrategy::CatchUp); - - // Verify all state is cleared - assert!(manager.pending().block_number().is_none()); - assert_eq!(manager.completed_cache.len(), 0); - } - - #[test] - fn test_reorg_clears_all_cached_sequences() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build pending sequence for block 100 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Add another sequence - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - // Verify we have state - assert!(manager.pending().block_number().is_some()); - assert!(!manager.completed_cache.is_empty()); - - // Simulate reorg at block 100: canonical has different tx than our cached - // We need to insert a tx in the sequence to make reorg detection work - // The reorg detection compares our pending transactions vs canonical - // Since we have no pending transactions (TestFlashBlockFactory creates empty tx lists), - // we need to use a different approach - process with tx hashes that don't match empty - - // Actually, let's verify the state clearing on HandleReorg by checking - // that any non-empty canonical_tx_hashes when we have state triggers reorg - let canonical_tx_hashes = vec![B256::repeat_byte(0xAA)]; - let canonical = canonical_for(&manager, 100, canonical_tx_hashes); - let strategy = manager.process_canonical_block(canonical, 10); - - // Should detect reorg (canonical has txs, we have none for that block) - assert_eq!(strategy, ReconciliationStrategy::HandleReorg); - - // Verify all state is cleared - assert!(manager.pending().block_number().is_none()); - assert_eq!(manager.completed_cache.len(), 0); - } - - #[test] - fn test_depth_limit_exceeded_clears_all_state() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build sequences for blocks 100-102 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - - // Verify state exists - assert_eq!(manager.earliest_block_number(), Some(100)); - assert_eq!(manager.latest_block_number(), Some(102)); - - // Canonical at 101 with max_depth of 0 (depth = 101 - 100 = 1 > 0) - // Since canonical < latest (102), this should trigger depth limit exceeded - let canonical = canonical_for(&manager, 101, vec![]); - let strategy = manager.process_canonical_block(canonical, 0); - assert!(matches!(strategy, ReconciliationStrategy::DepthLimitExceeded { .. })); - - // Verify all state is cleared - assert!(manager.pending().block_number().is_none()); - assert_eq!(manager.completed_cache.len(), 0); - } - - #[test] - fn test_continue_preserves_all_state() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build sequences for blocks 100-102 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1.clone()).unwrap(); - - let fb2 = factory.flashblock_for_next_block(&fb1).build(); - manager.insert_flashblock(fb2).unwrap(); - - let cached_count = manager.completed_cache.len(); - - // Canonical at 99 (behind pending) with reasonable depth limit - let canonical = canonical_for(&manager, 99, vec![]); - let strategy = manager.process_canonical_block(canonical, 10); - assert_eq!(strategy, ReconciliationStrategy::Continue); - - // Verify state is preserved - assert_eq!(manager.pending().block_number(), Some(102)); - assert_eq!(manager.completed_cache.len(), cached_count); - } - - #[test] - fn test_clear_all_removes_pending_and_cache() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build up state - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - // Verify state exists - assert!(manager.pending().block_number().is_some()); - assert!(!manager.completed_cache.is_empty()); - assert!(manager.pending_transaction_count() > 0 || manager.pending().count() > 0); - - // Clear via catchup - let canonical = canonical_for(&manager, 101, vec![]); - manager.process_canonical_block(canonical, 10); - - // Verify complete clearing - assert!(manager.pending().block_number().is_none()); - assert_eq!(manager.pending().count(), 0); - assert!(manager.completed_cache.is_empty()); - assert_eq!(manager.pending_transaction_count(), 0); - } - - // ==================== Tracked Fingerprint Tests ==================== - - #[test] - fn test_tracked_fingerprint_returns_none_for_unknown_block() { - let manager: SequenceManager = SequenceManager::new(true); - - // No flashblocks inserted, should return none - let fingerprint = manager.tracked_fingerprint_for_block(100); - assert!(fingerprint.is_none()); - } - - #[test] - fn test_no_false_reorg_for_untracked_block() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build pending sequence for block 100 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Add another sequence for block 101 - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - // Verify we have state for blocks 100 (cached) and 101 (pending) - assert_eq!(manager.earliest_block_number(), Some(100)); - assert_eq!(manager.latest_block_number(), Some(101)); - - // Process canonical block 99 (not tracked) with transactions - // This should NOT trigger reorg detection because we don't track block 99 - let canonical_tx_hashes = vec![B256::repeat_byte(0xAA)]; - let canonical = canonical_for(&manager, 99, canonical_tx_hashes); - let strategy = manager.process_canonical_block(canonical, 10); - - // Should continue (not reorg) because block 99 is outside our tracked window - assert_eq!(strategy, ReconciliationStrategy::Continue); - - // State should be preserved - assert_eq!(manager.pending().block_number(), Some(101)); - assert!(!manager.completed_cache.is_empty()); - } - - #[test] - fn test_reorg_detected_for_tracked_block_with_different_txs() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build pending sequence for block 100 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Add another sequence for block 101 - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - // Process canonical block 100 (which IS tracked) with different transactions - // Our tracked block 100 has empty tx list, canonical has non-empty - let canonical_tx_hashes = vec![B256::repeat_byte(0xAA)]; - let canonical = canonical_for(&manager, 100, canonical_tx_hashes); - let strategy = manager.process_canonical_block(canonical, 10); - - // Should detect reorg because we track block 100 and txs don't match - assert_eq!(strategy, ReconciliationStrategy::HandleReorg); - - // State should be cleared - assert!(manager.pending().block_number().is_none()); - assert!(manager.completed_cache.is_empty()); - } - - #[test] - fn test_reorg_detected_for_tracked_block_with_parent_hash_mismatch() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build pending sequence for block 100 and cache it by starting block 101. - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - let tracked = manager - .tracked_fingerprint_for_block(100) - .expect("tracked fingerprint for block 100 should exist"); - let canonical = CanonicalBlockFingerprint { - block_number: 100, - block_hash: tracked.block_hash, - parent_hash: B256::repeat_byte(0xAA), // Different parent hash, identical txs. - tx_hashes: tracked.tx_hashes, - }; - - let strategy = manager.process_canonical_block(canonical, 10); - assert_eq!(strategy, ReconciliationStrategy::HandleReorg); - assert!(manager.pending().block_number().is_none()); - assert!(manager.completed_cache.is_empty()); - } - - #[test] - fn test_reorg_detected_for_tracked_block_with_block_hash_mismatch() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Build pending sequence for block 100 and cache it by starting block 101. - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - let tracked = manager - .tracked_fingerprint_for_block(100) - .expect("tracked fingerprint for block 100 should exist"); - let canonical = CanonicalBlockFingerprint { - block_number: 100, - block_hash: B256::repeat_byte(0xBB), // Different block hash, identical parent+txs. - parent_hash: tracked.parent_hash, - tx_hashes: tracked.tx_hashes, - }; - - let strategy = manager.process_canonical_block(canonical, 10); - assert_eq!(strategy, ReconciliationStrategy::HandleReorg); - assert!(manager.pending().block_number().is_none()); - assert!(manager.completed_cache.is_empty()); - } - - #[test] - fn test_tracked_fingerprint_for_pending_block() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Create flashblock without transactions (empty tx list is valid) - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0).unwrap(); - - // Should find tracked fingerprint for block 100 - let fingerprint = manager.tracked_fingerprint_for_block(100); - assert!(fingerprint.is_some()); - assert!(fingerprint.unwrap().tx_hashes.is_empty()); // No transactions in this flashblock - } - - #[test] - fn test_tracked_fingerprint_for_cached_block() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Create first flashblock for block 100 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Create second flashblock for block 101 (caches block 100) - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - // Should find tracked fingerprint for cached block 100 - let fingerprint = manager.tracked_fingerprint_for_block(100); - assert!(fingerprint.is_some()); - assert!(fingerprint.as_ref().unwrap().tx_hashes.is_empty()); - - // Should find tracked fingerprint for pending block 101 - let fingerprint = manager.tracked_fingerprint_for_block(101); - assert!(fingerprint.is_some()); - assert!(fingerprint.as_ref().unwrap().tx_hashes.is_empty()); - } -} diff --git a/crates/flashblocks/src/cache/confirm.rs b/crates/flashblocks/src/cache/confirm.rs new file mode 100644 index 00000000..731c510a --- /dev/null +++ b/crates/flashblocks/src/cache/confirm.rs @@ -0,0 +1,599 @@ +use crate::CachedTxInfo; +use eyre::eyre; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; + +use alloy_consensus::transaction::TxHashRef; +use alloy_primitives::{TxHash, B256}; + +use reth_chain_state::ExecutedBlock; +use reth_primitives_traits::{BlockBody, NodePrimitives, ReceiptTy}; +use reth_rpc_eth_types::block::BlockAndReceipts; + +const DEFAULT_CONFIRM_BLOCK_CACHE_SIZE: usize = 50; +const DEFAULT_TX_CACHE_SIZE: usize = DEFAULT_CONFIRM_BLOCK_CACHE_SIZE * 10_000; + +#[derive(Debug)] +pub(crate) struct ConfirmedBlock { + /// The locally built pending block with execution output. + pub(crate) executed_block: ExecutedBlock, + /// The receipts for the pending block + pub(crate) receipts: Arc>>, +} + +impl ConfirmedBlock { + /// Returns a pair of [`RecoveredBlock`] and a vector of [`NodePrimitives::Receipt`]s by + /// cloning from borrowed self. + pub(crate) fn to_block_and_receipts(&self) -> BlockAndReceipts { + BlockAndReceipts { + block: self.executed_block.recovered_block.clone(), + receipts: self.receipts.clone(), + } + } +} + +/// Confirmed flashblocks sequence cache that is ahead of the current node's canonical +/// chainstate. We optimistically commit confirmed flashblocks sequences to the cache +/// and flush them when the canonical chainstate catches up. +/// +/// Block data is stored in a `BTreeMap` keyed by block number, enabling O(log n) +/// range splits in [`flush_up_to`](Self::flush_up_to). +/// A secondary `HashMap` provides O(1) block hash to block number reverse lookups. +/// +/// Transaction data is stored in a `HashMap` which indexes transaction hashes to +/// [`CachedTxInfo`] for O(1) tx/receipt lookups. +#[derive(Debug)] +pub(crate) struct ConfirmCache { + /// Primary storage: block number → (block hash, block + receipts). + /// `BTreeMap` ordering enables efficient range-based flush via `split_off`. + blocks: BTreeMap)>, + /// Reverse index: block hash → block number for O(1) hash-based lookups. + hash_to_number: HashMap, + /// Transaction index: tx hash → cached tx info for O(1) tx/receipt lookups. + tx_index: HashMap>, +} + +impl Default for ConfirmCache { + fn default() -> Self { + Self::new() + } +} + +impl ConfirmCache { + /// Creates a new [`ConfirmCache`]. + pub(crate) fn new() -> Self { + Self { + blocks: BTreeMap::new(), + hash_to_number: HashMap::with_capacity(DEFAULT_CONFIRM_BLOCK_CACHE_SIZE), + tx_index: HashMap::with_capacity(DEFAULT_TX_CACHE_SIZE), + } + } + + /// Inserts a confirmed block into the cache, indexed by block number and block hash. + pub(crate) fn insert( + &mut self, + height: u64, + executed_block: ExecutedBlock, + receipts: Arc>>, + ) -> eyre::Result<()> { + if self.blocks.len() >= DEFAULT_CONFIRM_BLOCK_CACHE_SIZE + && !self.blocks.contains_key(&height) + { + return Err(eyre!( + "confirm cache at max capacity ({DEFAULT_CONFIRM_BLOCK_CACHE_SIZE}), cannot insert block: {height}" + )); + } + if let Some((old_hash, old_block)) = self.blocks.remove(&height) { + // Clean up old entries at this height if exist + self.hash_to_number.remove(&old_hash); + self.remove_tx_index_for_block(&old_block); + } + + // Build tx index entries for all transactions in this block + let hash = executed_block.recovered_block.hash(); + let txs = executed_block.recovered_block.body().transactions(); + for (idx, (tx, receipt)) in txs.iter().zip(receipts.as_ref().iter()).enumerate() { + let tx_hash = *tx.tx_hash(); + self.tx_index.insert( + tx_hash, + CachedTxInfo { + block_number: height, + block_hash: hash, + tx_index: idx as u64, + tx: tx.clone(), + receipt: receipt.clone(), + }, + ); + } + // Build block index entries for block data + self.hash_to_number.insert(hash, height); + self.blocks.insert(height, (hash, ConfirmedBlock { executed_block, receipts })); + Ok(()) + } + + /// Clears all entries. + pub(crate) fn clear(&mut self) { + self.tx_index.clear(); + self.blocks.clear(); + self.hash_to_number.clear(); + } + + /// Returns the block number for the given block hash, if cached. + pub(crate) fn number_for_hash(&self, block_hash: &B256) -> Option { + self.hash_to_number.get(block_hash).copied() + } + + /// Returns the confirmed block for the given block hash, if present. + pub(crate) fn get_block_by_hash(&self, block_hash: &B256) -> Option> { + self.get_block_by_number(self.number_for_hash(block_hash)?) + } + + /// Returns the confirmed block for the given block number, if present. + pub(crate) fn get_block_by_number(&self, block_number: u64) -> Option> { + self.blocks.get(&block_number).map(|(_, entry)| entry.to_block_and_receipts()) + } + + /// Returns the cached transaction info for the given tx hash, if present. + pub(crate) fn get_tx_info( + &self, + tx_hash: &TxHash, + ) -> Option<(CachedTxInfo, BlockAndReceipts)> { + let tx_info = self.tx_index.get(tx_hash).cloned()?; + let block = self.get_block_by_number(tx_info.block_number)?; + Some((tx_info, block)) + } + + /// Returns all `ExecutedBlock`s in the cache up to and including `target_height`, + /// ordered newest to oldest (for use with `MemoryOverlayStateProvider`). + /// + /// Returns an error if state cache pollution detected (non-contiguous blocks). + pub(crate) fn get_executed_blocks_up_to_height( + &self, + target_height: u64, + canon_height: u64, + ) -> eyre::Result>> { + // Validation checks + let entries: Vec<_> = self.blocks.range(..=target_height).collect(); + if !entries.is_empty() { + // Verify lowest overlay block must be at most `canon_height + 1` to ensure + // no gap between canonical state and the overlay + let lowest = *entries[0].0; + if lowest > canon_height + 1 { + return Err(eyre!( + "gap between canonical height {canon_height} and lowest overlay block {lowest}" + )); + } + // Verify contiguity + for window in entries.windows(2) { + let (a, _) = window[0]; + let (b, _) = window[1]; + if *b != *a + 1 { + return Err(eyre!( + "non-contiguous confirm cache: gap between blocks {a} and {b}" + )); + } + } + } + Ok(entries + .into_iter() + .rev() + .map(|(_, (_, confirmed))| confirmed.executed_block.clone()) + .collect()) + } + + /// Returns the `ExecutedBlock` for the given block number, if present. + pub(crate) fn get_executed_block_by_number( + &self, + block_number: u64, + ) -> Option> { + self.blocks.get(&block_number).map(|(_, entry)| entry.executed_block.clone()) + } + + /// Removes all tx index entries for the transactions in the given block. + fn remove_tx_index_for_block(&mut self, block: &ConfirmedBlock) { + for tx in block.executed_block.recovered_block.body().transactions() { + self.tx_index.remove(tx.tx_hash()); + } + } + + /// Flushes all entries with block number <= `canonical_number`. + /// + /// Called when the canonical chain catches up to the confirmed cache. Returns + /// the number of entries flushed. + pub(crate) fn flush_up_to_height(&mut self, canon_height: u64) -> usize { + let retained = self.blocks.split_off(&(canon_height + 1)); + let stale = std::mem::replace(&mut self.blocks, retained); + let count = stale.len(); + for (hash, bar) in stale.into_values() { + self.hash_to_number.remove(&hash); + self.remove_tx_index_for_block(&bar); + } + count + } + + /// Returns the number of cached entries. + #[cfg(test)] + pub(crate) fn len(&self) -> usize { + self.blocks.len() + } + + /// Returns `true` if the cache is empty. + #[cfg(test)] + pub(crate) fn is_empty(&self) -> bool { + self.blocks.is_empty() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{empty_receipts, make_executed_block, make_executed_block_with_txs}; + use alloy_consensus::BlockHeader; + use reth_optimism_primitives::OpPrimitives; + + #[test] + fn test_confirm_cache_new_is_empty() { + let cache = ConfirmCache::::new(); + assert!(cache.is_empty()); + assert_eq!(cache.len(), 0); + } + + #[test] + fn test_confirm_cache_insert_single_block_increases_len() { + let mut cache = ConfirmCache::::new(); + let block = make_executed_block(1, B256::ZERO); + cache.insert(1, block, empty_receipts()).expect("insert should succeed"); + assert_eq!(cache.len(), 1); + assert!(!cache.is_empty()); + } + + #[test] + fn test_confirm_cache_insert_fails_at_max_capacity() { + let mut cache = ConfirmCache::::new(); + let mut parent = B256::ZERO; + for height in 1..=(DEFAULT_CONFIRM_BLOCK_CACHE_SIZE as u64) { + let block = make_executed_block(height, parent); + let hash = block.recovered_block.hash(); + cache.insert(height, block, empty_receipts()).expect("insert within capacity"); + parent = hash; + } + let overflow = make_executed_block(DEFAULT_CONFIRM_BLOCK_CACHE_SIZE as u64 + 1, parent); + let result = + cache.insert(DEFAULT_CONFIRM_BLOCK_CACHE_SIZE as u64 + 1, overflow, empty_receipts()); + assert!(result.is_err()); + } + + #[test] + fn test_confirm_cache_get_block_by_number_returns_correct_block() { + let mut cache = ConfirmCache::::new(); + let block = make_executed_block(42, B256::ZERO); + cache.insert(42, block, empty_receipts()).expect("insert"); + let result = cache.get_block_by_number(42); + assert!(result.is_some()); + assert_eq!(result.unwrap().block.number(), 42); + } + + #[test] + fn test_confirm_cache_get_block_by_number_returns_none_for_wrong_number() { + let mut cache = ConfirmCache::::new(); + let block = make_executed_block(42, B256::ZERO); + cache.insert(42, block, empty_receipts()).expect("insert"); + assert!(cache.get_block_by_number(43).is_none()); + assert!(cache.get_block_by_number(0).is_none()); + } + + #[test] + fn test_confirm_cache_get_block_by_hash_returns_correct_block() { + let mut cache = ConfirmCache::::new(); + let block = make_executed_block(42, B256::ZERO); + let block_hash = block.recovered_block.hash(); + cache.insert(42, block, empty_receipts()).expect("insert"); + let result = cache.get_block_by_hash(&block_hash); + assert!(result.is_some()); + assert_eq!(result.unwrap().block.number(), 42); + } + + #[test] + fn test_confirm_cache_get_block_by_hash_returns_none_for_unknown_hash() { + let mut cache = ConfirmCache::::new(); + let block = make_executed_block(42, B256::ZERO); + cache.insert(42, block, empty_receipts()).expect("insert"); + assert!(cache.get_block_by_hash(&B256::repeat_byte(0xFF)).is_none()); + } + + #[test] + fn test_confirm_cache_number_for_hash_returns_correct_mapping() { + let mut cache = ConfirmCache::::new(); + let block = make_executed_block(10, B256::ZERO); + let hash = block.recovered_block.hash(); + cache.insert(10, block, empty_receipts()).expect("insert"); + assert_eq!(cache.number_for_hash(&hash), Some(10)); + } + + #[test] + fn test_confirm_cache_clear_removes_all_entries() { + let mut cache = ConfirmCache::::new(); + let block = make_executed_block(1, B256::ZERO); + cache.insert(1, block, empty_receipts()).expect("insert"); + cache.clear(); + assert!(cache.is_empty()); + assert!(cache.get_block_by_number(1).is_none()); + } + + #[test] + fn test_confirm_cache_flush_up_to_height_removes_entries_at_or_below_height() { + let mut cache = ConfirmCache::::new(); + let mut parent = B256::ZERO; + for height in 1..=5 { + let block = make_executed_block(height, parent); + parent = block.recovered_block.hash(); + cache.insert(height, block, empty_receipts()).expect("insert"); + } + let count = cache.flush_up_to_height(3); + assert_eq!(count, 3); + assert_eq!(cache.len(), 2); + assert!(cache.get_block_by_number(3).is_none()); + assert!(cache.get_block_by_number(4).is_some()); + assert!(cache.get_block_by_number(5).is_some()); + } + + #[test] + fn test_confirm_cache_flush_up_to_height_higher_than_all_removes_all() { + let mut cache = ConfirmCache::::new(); + let mut parent = B256::ZERO; + for height in 1..=3 { + let block = make_executed_block(height, parent); + parent = block.recovered_block.hash(); + cache.insert(height, block, empty_receipts()).expect("insert"); + } + let count = cache.flush_up_to_height(100); + assert_eq!(count, 3); + assert!(cache.is_empty()); + } + + #[test] + fn test_confirm_cache_flush_up_to_height_zero_removes_nothing() { + let mut cache = ConfirmCache::::new(); + let block = make_executed_block(1, B256::ZERO); + cache.insert(1, block, empty_receipts()).expect("insert"); + let count = cache.flush_up_to_height(0); + assert_eq!(count, 0); + assert_eq!(cache.len(), 1); + } + + #[test] + fn test_confirm_cache_flush_removes_hash_indices_for_all_flushed_blocks() { + let mut cache = ConfirmCache::::new(); + let mut parent = B256::ZERO; + let mut hashes = vec![]; + for height in 1..=3 { + let block = make_executed_block(height, parent); + let hash = block.recovered_block.hash(); + hashes.push(hash); + cache.insert(height, block, empty_receipts()).expect("insert"); + parent = hash; + } + cache.flush_up_to_height(2); + assert!(cache.number_for_hash(&hashes[0]).is_none()); + assert!(cache.number_for_hash(&hashes[1]).is_none()); + assert!(cache.number_for_hash(&hashes[2]).is_some()); + } + + #[test] + fn test_confirm_cache_get_executed_blocks_up_to_height_returns_contiguous_blocks_newest_first() + { + let mut cache = ConfirmCache::::new(); + let block2 = make_executed_block(2, B256::repeat_byte(0x01)); + let block3 = make_executed_block(3, block2.recovered_block.hash()); + let block4 = make_executed_block(4, block3.recovered_block.hash()); + cache.insert(2, block2, empty_receipts()).expect("insert 2"); + cache.insert(3, block3, empty_receipts()).expect("insert 3"); + cache.insert(4, block4, empty_receipts()).expect("insert 4"); + let blocks = cache.get_executed_blocks_up_to_height(4, 1).unwrap(); + assert_eq!(blocks.len(), 3); + assert_eq!(blocks[0].recovered_block.number(), 4); + assert_eq!(blocks[1].recovered_block.number(), 3); + assert_eq!(blocks[2].recovered_block.number(), 2); + } + + #[test] + fn test_confirm_cache_get_executed_blocks_up_to_height_returns_empty_on_empty_cache() { + let cache = ConfirmCache::::new(); + let result = cache.get_executed_blocks_up_to_height(5, 1); + assert!(result.unwrap().is_empty()); + } + + #[test] + fn test_confirm_cache_get_executed_blocks_detects_gap_between_canonical_and_overlay() { + let mut cache = ConfirmCache::::new(); + let block3 = make_executed_block(3, B256::ZERO); + cache.insert(3, block3, empty_receipts()).expect("insert 3"); + assert!(cache.get_executed_blocks_up_to_height(3, 1).is_err()); + } + + #[test] + fn test_confirm_cache_get_executed_blocks_detects_non_contiguous_overlay() { + let mut cache = ConfirmCache::::new(); + let block2 = make_executed_block(2, B256::repeat_byte(0x01)); + let block4 = make_executed_block(4, B256::repeat_byte(0x03)); + cache.insert(2, block2, empty_receipts()).expect("insert 2"); + cache.insert(4, block4, empty_receipts()).expect("insert 4"); + assert!(cache.get_executed_blocks_up_to_height(4, 1).is_err()); + } + + #[test] + fn test_confirm_cache_get_executed_blocks_allows_redundant_overlap_with_canonical() { + let mut cache = ConfirmCache::::new(); + let block2 = make_executed_block(2, B256::repeat_byte(0x01)); + let block3 = make_executed_block(3, block2.recovered_block.hash()); + cache.insert(2, block2, empty_receipts()).expect("insert 2"); + cache.insert(3, block3, empty_receipts()).expect("insert 3"); + let blocks = cache.get_executed_blocks_up_to_height(3, 2).unwrap(); + assert_eq!(blocks.len(), 2); + } + + #[test] + fn test_confirm_cache_get_executed_blocks_single_block_contiguous_with_canonical() { + let mut cache = ConfirmCache::::new(); + let block5 = make_executed_block(5, B256::repeat_byte(0x04)); + cache.insert(5, block5, empty_receipts()).expect("insert 5"); + let blocks = cache.get_executed_blocks_up_to_height(5, 4).unwrap(); + assert_eq!(blocks.len(), 1); + assert_eq!(blocks[0].recovered_block.number(), 5); + } + + #[test] + fn test_confirm_cache_get_executed_blocks_returns_subset_up_to_target() { + let mut cache = ConfirmCache::::new(); + let block2 = make_executed_block(2, B256::repeat_byte(0x01)); + let block3 = make_executed_block(3, block2.recovered_block.hash()); + let block4 = make_executed_block(4, block3.recovered_block.hash()); + let block5 = make_executed_block(5, block4.recovered_block.hash()); + cache.insert(2, block2, empty_receipts()).expect("insert 2"); + cache.insert(3, block3, empty_receipts()).expect("insert 3"); + cache.insert(4, block4, empty_receipts()).expect("insert 4"); + cache.insert(5, block5, empty_receipts()).expect("insert 5"); + let blocks = cache.get_executed_blocks_up_to_height(3, 1).unwrap(); + assert_eq!(blocks.len(), 2); + assert_eq!(blocks[0].recovered_block.number(), 3); + assert_eq!(blocks[1].recovered_block.number(), 2); + } + + #[test] + fn test_confirm_cache_insert_same_height_twice_keeps_cache_len_at_one() { + let mut cache = ConfirmCache::::new(); + let block_a = make_executed_block(10, B256::ZERO); + let block_b = make_executed_block(10, B256::repeat_byte(0xFF)); + cache.insert(10, block_a, empty_receipts()).expect("first insert"); + cache.insert(10, block_b, empty_receipts()).expect("second insert"); + assert_eq!(cache.len(), 1); + } + + #[test] + fn test_confirm_cache_get_tx_info_returns_none_for_unknown_hash() { + let cache = ConfirmCache::::new(); + assert!(cache.get_tx_info(&B256::repeat_byte(0xAA)).is_none()); + } + + #[test] + fn test_confirm_cache_insert_builds_tx_index_correctly() { + let mut cache = ConfirmCache::::new(); + let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 3); + let block_hash = block.recovered_block.hash(); + let tx_hashes: Vec<_> = + block.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); + cache.insert(1, block, receipts).expect("insert"); + + for (i, tx_hash) in tx_hashes.iter().enumerate() { + let (info, bar) = cache.get_tx_info(tx_hash).expect("tx should be in tx_index"); + assert_eq!(info.block_number, 1); + assert_eq!(info.block_hash, block_hash); + assert_eq!(info.tx_index, i as u64); + assert_eq!(bar.block.number(), 1); + } + } + + #[test] + fn test_confirm_cache_flush_cleans_tx_index() { + let mut cache = ConfirmCache::::new(); + let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); + let tx_hashes: Vec<_> = + block.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); + cache.insert(1, block, receipts).expect("insert"); + + cache.flush_up_to_height(1); + for tx_hash in tx_hashes.iter() { + assert!(cache.get_tx_info(tx_hash).is_none()); + } + } + + #[test] + fn test_confirm_cache_insert_duplicate_height_cleans_stale_indexes() { + let mut cache = ConfirmCache::::new(); + let block_a = make_executed_block(10, B256::ZERO); + let hash_a = block_a.recovered_block.hash(); + let block_b = make_executed_block(10, B256::repeat_byte(0xFF)); + let hash_b = block_b.recovered_block.hash(); + + cache.insert(10, block_a, empty_receipts()).expect("first insert"); + cache.insert(10, block_b, empty_receipts()).expect("second insert"); + + assert_eq!(cache.number_for_hash(&hash_b), Some(10)); + // Stale hash_to_number entry is cleaned up on overwrite. + assert_eq!( + cache.number_for_hash(&hash_a), + None, + "stale hash_to_number entry should be removed on duplicate height insert" + ); + } + + #[test] + fn test_confirm_cache_insert_duplicate_height_retains_shared_tx_entries() { + // Two blocks at the same height share a transaction (same nonce → same hash). + // After replacing, the shared tx must still be present in the index. + let mut cache = ConfirmCache::::new(); + // block_a has txs with nonces [0, 1] + let (block_a, receipts_a) = make_executed_block_with_txs(10, B256::ZERO, 0, 2); + let shared_tx_hash: TxHash = + (*block_a.recovered_block.body().transactions().next().unwrap().tx_hash()).into(); + // block_b has txs with nonces [0, 2] — nonce 0 is shared with block_a + let (block_b, receipts_b) = make_executed_block_with_txs(10, B256::repeat_byte(0xFF), 0, 2); + let block_b_tx_hashes: Vec = block_b + .recovered_block + .body() + .transactions() + .map(|tx| (*tx.tx_hash()).into()) + .collect(); + + cache.insert(10, block_a, receipts_a).expect("first insert"); + assert!(cache.get_tx_info(&shared_tx_hash).is_some()); + + cache.insert(10, block_b, receipts_b).expect("second insert"); + // The shared tx (nonce 0) must still be in the index, pointing to block_b + let info = cache.get_tx_info(&shared_tx_hash); + assert!(info.is_some(), "shared tx should be retained after replacement"); + // All block_b txs should be present + for tx_hash in &block_b_tx_hashes { + assert!(cache.get_tx_info(tx_hash).is_some(), "block_b tx should be in index"); + } + } + + #[test] + fn test_confirm_cache_flush_cleans_tx_index_for_partial_flush() { + let mut cache = ConfirmCache::::new(); + let (block1, receipts1) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); + let tx_hashes_1: Vec<_> = + block1.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); + let parent = block1.recovered_block.hash(); + cache.insert(1, block1, receipts1).expect("insert 1"); + + let (block2, receipts2) = make_executed_block_with_txs(2, parent, 100, 2); + let tx_hashes_2: Vec<_> = + block2.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); + cache.insert(2, block2, receipts2).expect("insert 2"); + + cache.flush_up_to_height(1); + for tx_hash in tx_hashes_1.iter() { + assert!(cache.get_tx_info(tx_hash).is_none(), "block 1 tx should be gone"); + } + for tx_hash in tx_hashes_2.iter() { + assert!(cache.get_tx_info(tx_hash).is_some(), "block 2 tx should remain"); + } + } + + #[test] + fn test_confirm_cache_clear_cleans_tx_index() { + let mut cache = ConfirmCache::::new(); + let (block, receipts) = make_executed_block_with_txs(1, B256::ZERO, 0, 2); + let tx_hashes: Vec<_> = + block.recovered_block.body().transactions().map(|tx| tx.tx_hash()).collect(); + cache.insert(1, block, receipts).expect("insert"); + + cache.clear(); + for tx_hash in tx_hashes.iter() { + assert!(cache.get_tx_info(tx_hash).is_none()); + } + } +} diff --git a/crates/flashblocks/src/cache/mod.rs b/crates/flashblocks/src/cache/mod.rs new file mode 100644 index 00000000..ff52ef21 --- /dev/null +++ b/crates/flashblocks/src/cache/mod.rs @@ -0,0 +1,602 @@ +mod confirm; +pub mod pending; +pub(crate) mod raw; +pub(crate) mod utils; + +pub(crate) use confirm::ConfirmCache; +pub(crate) use raw::RawFlashblocksCache; + +pub use pending::PendingSequence; + +use crate::PendingSequenceRx; +use parking_lot::RwLock; +use std::sync::Arc; +use tokio::sync::watch; +use tracing::*; + +use alloy_consensus::BlockHeader; +use alloy_primitives::{TxHash, B256}; +use alloy_rpc_types_eth::{BlockId, BlockNumberOrTag}; + +use reth_chain_state::{CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider}; +use reth_primitives_traits::{NodePrimitives, ReceiptTy, SealedHeaderFor}; +use reth_rpc_eth_types::block::BlockAndReceipts; +use reth_storage_api::StateProviderBox; +use reth_trie_db::ChangesetCache; + +/// The minimum number of blocks to retain in the changeset cache after eviction. +/// +/// This ensures that recent trie changesets are kept in memory for potential reorgs, +/// even when the finalized block is not set (e.g., on L2s like Optimism). +const CHANGESET_CACHE_RETENTION_BLOCKS: u64 = 64; + +/// Cached transaction info (block context, receipt and tx data) for O(1) lookups +/// by transaction hash. +#[derive(Debug, Clone)] +pub struct CachedTxInfo { + /// Block number containing the transaction. + pub block_number: u64, + /// Block hash containing the transaction. + pub block_hash: B256, + /// Index of the transaction within the block. + pub tx_index: u64, + /// The signed transaction. + pub tx: N::SignedTx, + /// The corresponding receipt. + pub receipt: ReceiptTy, +} + +/// Top-level controller state cache for the flashblocks RPC layer. +/// +/// Pure data store composed of: +/// - **Pending**: the in-progress flashblock sequence being built from incoming +/// `OpFlashblockPayload` deltas (at most one active sequence at a time). +/// - **Confirmed**: completed flashblock sequences that have been committed but +/// are still ahead of the canonical chain. +/// +/// This cache is a **data source** — it does not wrap a provider or implement +/// any reth provider traits. The RPC override handler decides when to query +/// this cache vs the underlying chainstate provider. +/// +/// Uses `Arc` for thread safety — a single lock protects all inner +/// state, ensuring atomic operations across pending, confirmed, and height +/// state (e.g. reorg detection + flush + insert in `handle_confirmed_block`). +#[derive(Debug, Clone)] +pub struct FlashblockStateCache { + inner: Arc>>, + changeset_cache: ChangesetCache, + pub(crate) canon_in_memory_state: CanonicalInMemoryState, +} + +// FlashblockStateCache read interfaces +impl FlashblockStateCache { + /// Creates a new [`FlashblockStateCache`]. + pub fn new(canon_in_memory_state: CanonicalInMemoryState) -> Self { + Self { + inner: Arc::new(RwLock::new(FlashblockStateCacheInner::new())), + changeset_cache: ChangesetCache::new(), + canon_in_memory_state, + } + } +} + +// FlashblockStateCache read height interfaces +impl FlashblockStateCache { + /// Returns the changeset cache. + pub fn get_changeset_cache(&self) -> ChangesetCache { + self.changeset_cache.clone() + } + + /// Returns the current confirmed height. + pub fn get_confirm_height(&self) -> u64 { + self.inner.read().confirm_height + } + + /// Return the current canonical height, if any. + pub fn get_canon_height(&self) -> u64 { + self.inner.read().canon_info.0 + } + + /// Returns a clone of the current pending sequence, if any. + pub fn get_pending_sequence(&self) -> Option> { + self.inner.read().pending_cache.clone() + } + + pub fn get_rpc_block_by_id(&self, block_id: Option) -> Option> { + match block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)) { + BlockId::Number(id) => self.get_rpc_block(id), + BlockId::Hash(hash) => self.get_block_by_hash(&hash.block_hash), + } + } + + /// Returns the current pending block and receipts, if any. + pub fn get_rpc_block(&self, block_id: BlockNumberOrTag) -> Option> { + match block_id { + BlockNumberOrTag::Pending => self.inner.read().get_pending_block(), + BlockNumberOrTag::Latest => self.inner.read().get_confirmed_block(), + BlockNumberOrTag::Number(num) => self.get_block_by_number(num), + _ => None, + } + } + + /// Returns the block for the given block number, if cached. + pub fn get_block_by_number(&self, num: u64) -> Option> { + self.inner.read().get_block_by_number(num) + } + + /// Returns the confirmed block for the given block hash, if cached. + pub fn get_block_by_hash(&self, hash: &B256) -> Option> { + self.inner.read().get_block_by_hash(hash) + } + + /// Looks up cached transaction info by hash: pending sequence first, then + /// confirm cache. Returns `None` if the tx is not in either cache layer. + pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option<(CachedTxInfo, BlockAndReceipts)> { + self.inner.read().get_tx_info(tx_hash) + } + + /// Returns a cloned watch receiver for pending sequence state updates. + pub fn subscribe_pending_sequence(&self) -> PendingSequenceRx { + self.inner.read().subscribe_pending_sequence() + } + + /// Instantiates a `MemoryOverlayStateProvider` that overlays the flashblock + /// execution state on top of the canonical state for the given block ID. + /// + /// 1. Block number/hash - all block overlays in the cache up to that block. + /// 2. `Pending` - all block overlays in the flashblocks state cache, which + /// includes the current pending executed block state. + /// 3. `Latest` - all block overlays in the confirm cache up to the confirm + /// height. + /// + /// Returns `None` if the target block is not in the flashblocks cache. + pub fn get_state_provider_by_id( + &self, + block_id: Option, + canonical_state: StateProviderBox, + ) -> Option<(StateProviderBox, SealedHeaderFor)> { + let guard = self.inner.read(); + let block = match block_id.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest)) { + BlockId::Number(id) => match id { + BlockNumberOrTag::Pending => guard.get_pending_block(), + BlockNumberOrTag::Latest => guard.get_confirmed_block(), + BlockNumberOrTag::Number(num) => guard.get_block_by_number(num), + _ => None, + }, + BlockId::Hash(hash) => guard.get_block_by_hash(&hash.block_hash), + }? + .block; + let block_num = block.number(); + let in_memory = guard.get_executed_blocks_up_to_height(block_num); + drop(guard); + + let in_memory = match in_memory { + Ok(blocks) => blocks, + Err(e) => { + // Flush as the overlay is non-contiguous, indicating potential poluuted state. + warn!(target: "flashblocks", "Failed to get flashblocks state provider: {e}. Flushing cache"); + self.inner.write().flush(); + None + } + }?; + Some(( + Box::new(MemoryOverlayStateProvider::new(canonical_state, in_memory)), + block.clone_sealed_header(), + )) + } + + /// Instantiates a `MemoryOverlayStateProvider` with all block overlays in + /// the flashblocks state cache, including the current pending executed + /// block state. + pub fn get_pending_state_provider( + &self, + canonical_state: StateProviderBox, + ) -> Option<(StateProviderBox, SealedHeaderFor)> { + let guard = self.inner.read(); + let block = guard.get_pending_block()?.block; + let block_num = block.number(); + let in_memory = guard.get_executed_blocks_up_to_height(block_num); + drop(guard); + + let in_memory = match in_memory { + Ok(blocks) => blocks, + Err(e) => { + // Flush as the overlay is non-contiguous, indicating potential poluuted state. + warn!(target: "flashblocks", "Failed to get flashblocks state provider: {e}. Flushing cache"); + self.inner.write().flush(); + None + } + }?; + Some(( + Box::new(MemoryOverlayStateProvider::new(canonical_state, in_memory)), + block.clone_sealed_header(), + )) + } + + /// Returns all overlay blocks for the given hash, spanning from the + /// persisted on-disk anchor up through the flashblocks state cache. + /// + /// Overlay blocks are collected newest-to-oldest from two layers: + /// 1. **Flashblocks state cache** — pending + confirmed blocks + /// 2. **Engine canonical in-memory state** — blocks committed to the + /// canonical chain but not yet persisted to disk + /// + /// Returns the overlay blocks, sealed header of the requested block, + /// and the on-disk anchor hash. Returns `Ok(None)` if the block is + /// not found in either layer (i.e. it is fully persisted on disk). + #[expect(clippy::type_complexity)] + pub fn get_overlay_data( + &self, + block_hash: &B256, + ) -> eyre::Result>, SealedHeaderFor, B256)>> { + // 1. Retrieve flashblocks state cache overlay + let guard = self.inner.read(); + let mut header = + guard.get_block_by_hash(block_hash).map(|bar| bar.block.clone_sealed_header()); + let mut overlay = if let Some(ref h) = header { + let block_num = h.number(); + guard.get_executed_blocks_up_to_height(block_num)?.unwrap_or_default() + } else { + Vec::new() + }; + let canon_hash = guard.get_canon_info().1; + drop(guard); + + // 2. Retrieve engine canonical in-memory blocks + let anchor_hash = + if let Some(block_state) = self.canon_in_memory_state.state_by_hash(canon_hash) { + let anchor = block_state.anchor(); + if header.is_none() { + header = block_state + .chain() + .find(|s| s.hash() == *block_hash) + .map(|s| s.block_ref().recovered_block().sealed_header().clone()) + } + overlay.extend(block_state.chain().map(|s| s.block())); + anchor.hash + } else { + canon_hash + }; + + if overlay.is_empty() || header.is_none() { + // Block hash not found, already persisted to disk + return Ok(None); + } + Ok(Some((overlay, header.expect("valid cached header"), anchor_hash))) + } + + /// Returns the `ExecutedBlock` for the given block number from pending or confirm cache. + /// Used for diagnostic comparison with the engine's execution. + pub fn debug_get_executed_block_by_number( + &self, + block_number: u64, + ) -> Option> { + let guard = self.inner.read(); + if let Some(seq) = guard.pending_cache.as_ref() + && seq.get_height() == block_number + && seq.is_target_flashblock() + { + return Some(seq.pending.executed_block.clone()); + } + guard.confirm_cache.get_executed_block_by_number(block_number) + } +} + +// FlashblockStateCache state mutation interfaces. +impl FlashblockStateCache { + /// Handles updating the latest pending state by the flashblocks rpc handle. + /// + /// This method detects when the flashblocks sequencer has advanced to the next + /// pending sequence height, and optimistically commits the current pending + /// sequence to the confirm cache before advancing the pending tip. + /// + /// If the pending sequence to be updated is the same as the current pending + /// sequence, it will replace the existing with the incoming pending sequence. + /// + /// Note that this state update is fallible if something goes really wrong here + /// as it detects potential reorgs and flashblocks state cache pollution. An entry + /// is invalidated if the incoming pending sequence height is not the next pending + /// height or current pending height. + pub fn handle_pending_sequence( + &self, + pending_sequence: PendingSequence, + ) -> eyre::Result<()> { + self.inner.write().handle_pending_sequence(pending_sequence) + } + + /// Handles a canonical block committed to the canonical chainstate. + /// + /// This method will flush the confirm cache up to the canonical block height and + /// commits the pending state to the confirm cache if it matches the committed block. + /// This ensures that the flashblocks state cache memory does not grow unbounded. + /// + /// It also detects chainstate re-orgs (set with re-org arg flag) and flashblocks + /// state cache pollution. By default once error is detected, we will automatically + /// flush the flashblocks state cache. + pub fn handle_canonical_block(&self, canon_info: (u64, B256), reorg: bool) -> bool { + debug!( + target: "flashblocks", + canonical_height = canon_info.0, + "Flashblocks state cache received canonical block" + ); + + // Evict trie changesets for blocks below the eviction threshold. + // Keep at least CHANGESET_CACHE_RETENTION_BLOCKS from the persisted tip, and also respect + // the finalized block if set. + let eviction_threshold = canon_info.0.saturating_sub(CHANGESET_CACHE_RETENTION_BLOCKS); + debug!( + target: "flashblocks", + canonical_height = canon_info.0, + eviction_threshold = eviction_threshold, + "Evicting changesets below threshold" + ); + self.changeset_cache.evict(eviction_threshold); + self.inner.write().handle_canonical_block(canon_info, reorg) + } +} + +/// Inner state of the flashblocks state cache. +#[derive(Debug)] +struct FlashblockStateCacheInner { + /// The current in-progress pending flashblock sequence, if any. + pending_cache: Option>, + /// Cache of confirmed flashblock sequences ahead of the canonical chain. + confirm_cache: ConfirmCache, + /// Highest confirmed block height in the confirm cache. If flashblocks state cache + /// is uninitialized, the confirm height is set to 0. + confirm_height: u64, + /// Highest confirmed block height in the canonical chainstate. + canon_info: (u64, B256), + /// Receiver of the most recent executed [`PendingSequence`] built from the latest + /// flashblocks sequence. + pending_sequence_rx: PendingSequenceRx, + /// Sender of the most recent executed [`PendingSequence`] built from the latest + /// flashblocks sequence. + pending_sequence_tx: watch::Sender>>, +} + +impl FlashblockStateCacheInner { + fn new() -> Self { + let (tx, rx) = watch::channel(None); + + Self { + pending_cache: None, + confirm_cache: ConfirmCache::new(), + confirm_height: 0, + canon_info: (0, B256::ZERO), + pending_sequence_rx: rx, + pending_sequence_tx: tx, + } + } + + fn flush(&mut self) { + warn!(target: "flashblocks", "Flushing flashblocks state cache"); + self.pending_cache = None; + self.confirm_cache.clear(); + self.confirm_height = self.canon_info.0; + } + + /// Handles flushing a newly confirmed block to the confirm cache. Note that + /// this state update is fallible as it detects potential reorgs, and triggers + /// cache flush on invalidate entries. + /// + /// An entry is invalidated if: + /// 1. Block height to be is lower than the cache's confirmed height + /// 2. Block height to be is not the next confirm block height + fn handle_confirmed_block( + &mut self, + block_number: u64, + executed_block: ExecutedBlock, + receipts: Arc>>, + ) -> eyre::Result<()> { + if block_number != self.confirm_height + 1 { + return Err(eyre::eyre!( + "polluted state cache - not next consecutive target confirm height block" + )); + } + self.confirm_height = block_number; + self.confirm_cache.insert(block_number, executed_block, receipts)?; + info!( + target: "flashblocks", + confirm_height = self.confirm_height, + canonical_height = self.canon_info.0, + "Committed pending block to confirm flashblocks state cache", + ); + Ok(()) + } + + fn handle_pending_sequence( + &mut self, + pending_sequence: PendingSequence, + ) -> eyre::Result<()> { + let pending_height = pending_sequence.get_height(); + let expected_height = self.confirm_height + 1; + + if pending_height == expected_height { + let incoming_seq = pending_sequence.clone(); + if pending_sequence.is_target_flashblock() { + // Target flashblock. Promote to confirm, and clear pending state + self.handle_confirmed_block( + expected_height, + incoming_seq.pending.executed_block, + incoming_seq.pending.receipts, + )?; + self.pending_cache = None; + } else { + // In-progress — replace pending with newer flashblock + self.pending_cache = Some(incoming_seq); + } + } else if pending_height == expected_height + 1 { + // The next block's flashblock arrived. The target flashblocks was missed on + // the builder. Promote current pending to confirm, and set incoming as new + // pending sequence. + let sequence = self.pending_cache.take().ok_or_else(|| { + eyre::eyre!( + "polluted state cache - trying to advance pending tip but no current pending" + ) + })?; + self.handle_confirmed_block( + expected_height, + sequence.pending.executed_block, + sequence.pending.receipts, + )?; + self.pending_cache = Some(pending_sequence.clone()); + } else { + return Err(eyre::eyre!( + "polluted state cache - not next consecutive pending height block" + )); + } + let _ = self.pending_sequence_tx.send(Some(pending_sequence)); + Ok(()) + } + + fn handle_canonical_block(&mut self, canon_info: (u64, B256), reorg: bool) -> bool { + // If the pending sequence matches the canonical block exactly, the target flashblocks + // was missed on the builder. Promote the current pending to confirm, and set the + // pending state to none. + if let Some(sequence) = self.pending_cache.as_ref() + && sequence.get_height() == canon_info.0 + && sequence.get_hash() == canon_info.1 + && canon_info.0 == self.confirm_height + 1 + { + let sequence = self.pending_cache.take().expect("just confirmed is_some"); + if let Err(e) = self.handle_confirmed_block( + canon_info.0, + sequence.pending.executed_block, + sequence.pending.receipts, + ) { + warn!(target: "flashblocks", err = %e, "Canonical block handle failed to promote pending sequence to confirm"); + } + } + + let pending_stale = + self.pending_cache.as_ref().is_some_and(|p| p.get_height() <= canon_info.0); + let hash_mismatch = self.confirm_cache.number_for_hash(&canon_info.1).is_none() + && self.confirm_cache.get_block_by_number(canon_info.0).is_some(); + let flush = pending_stale || hash_mismatch || reorg; + if flush { + warn!( + target: "flashblocks", + canonical_height = canon_info.0, + confirm_height = self.confirm_height, + canonical_reorg = reorg, + pending_stale, + hash_mismatch, + "Reorg or pending stale detected on handle canonical block", + ); + self.flush(); + } else { + debug!( + target: "flashblocks", + canonical_height = canon_info.0, + confirm_height = self.confirm_height, + "Evicting flashblocks state inner cache" + ); + self.confirm_cache.flush_up_to_height(canon_info.0); + } + // Update state heights + self.canon_info = canon_info; + self.confirm_height = self.confirm_height.max(canon_info.0); + flush + } + + pub fn get_confirmed_block(&self) -> Option> { + self.get_block_by_number(self.confirm_height) + } + + pub fn get_pending_block(&self) -> Option> { + self.pending_cache.as_ref().map(|p| p.get_block_and_receipts()) + } + + pub fn get_canon_info(&self) -> (u64, B256) { + self.canon_info + } + + pub fn get_block_by_number(&self, num: u64) -> Option> { + if let Some(pending_sequence) = self.pending_cache.as_ref() + && pending_sequence.get_height() == num + { + return Some(pending_sequence.get_block_and_receipts()); + } + self.confirm_cache.get_block_by_number(num) + } + + pub fn get_block_by_hash(&self, hash: &B256) -> Option> { + if let Some(pending_sequence) = self.pending_cache.as_ref() + && pending_sequence.get_hash() == *hash + { + return Some(pending_sequence.get_block_and_receipts()); + } + self.confirm_cache.get_block_by_hash(hash) + } + + fn get_tx_info(&self, tx_hash: &TxHash) -> Option<(CachedTxInfo, BlockAndReceipts)> { + self.pending_cache + .as_ref() + .and_then(|p| p.get_tx_info(tx_hash)) + .or_else(|| self.confirm_cache.get_tx_info(tx_hash)) + } + + /// Returns the ordered vector of `ExecutedBlock`s from the cache. + /// + /// # Safety of the overlay + /// The returned blocks used for state overlay is correct **if and only if** the + /// blocks form a contiguous chain from some height down to `canonical_height + 1` + /// (or `canonical_height` itself in the redundant-but-safe race case). + /// + /// **Safe (redundant overlap)**: Due to a race between canonical commit and confirm + /// cache flush, the lowest overlay block may be equal to or lower than the canonical + /// height. + /// + /// For example, canonical is at height `x` and the overlay contains `[x+2, x+1, x]`. + /// This is safe the overlay blocks are checked first (newest-to-oldest). The state + /// at height `x` contains changes identical to what canonical already applied, so + /// the result is correct regardless of which source resolves the query. + /// + /// **State inconsistency (gap in overlay)**: If an intermediate block is missing, + /// for example overlay has `[x+2, x]` but not `x+1`, then any account modified only + /// at height `x+1` would be invisible — the query falls through to canonical which + /// returns stale incorrect state. + /// + /// **State inconsistency (canonical too far behind)**: If the canonical height is + /// more than one block below the lowest overlay block. For example, canonical at + /// `x-2`, lowest overlay at `x`, then changes at height `x-1` are not covered by + /// either source. + /// + /// Both failure modes reduce to: every height between `canonical_height + 1` and + /// the target must be present in the overlay. This invariant is naturally maintained + /// by `handle_confirmed_block` (rejects non-consecutive heights). The pending block, + /// if present, sits at `confirm_height + 1`; it may be absent after a complete + /// sequence is promoted directly to the confirm cache via `target_index`. + /// + /// On validation failure (non-contiguous overlay or gap to canonical), the cache is + /// flushed and `None` is returned. + fn get_executed_blocks_up_to_height( + &self, + target_height: u64, + ) -> eyre::Result>>> { + if self.confirm_height == 0 + || self.canon_info.0 == 0 + || target_height > self.confirm_height + 1 + || target_height <= self.canon_info.0 + { + // Cache not initialized or target height is outside the cache range + return Ok(None); + } + let mut blocks = Vec::new(); + if let Some(p) = self.pending_cache.as_ref() + && p.get_height() == target_height + { + blocks.push(p.pending.executed_block.clone()); + } + blocks.extend( + self.confirm_cache + .get_executed_blocks_up_to_height(target_height, self.canon_info.0)?, + ); + Ok(Some(blocks)) + } + + pub fn subscribe_pending_sequence(&self) -> PendingSequenceRx { + self.pending_sequence_rx.clone() + } +} diff --git a/crates/flashblocks/src/cache/pending.rs b/crates/flashblocks/src/cache/pending.rs new file mode 100644 index 00000000..619250e9 --- /dev/null +++ b/crates/flashblocks/src/cache/pending.rs @@ -0,0 +1,154 @@ +use crate::{cache::CachedTxInfo, execution::PrefixExecutionMeta}; +use derive_more::Deref; +use std::collections::HashMap; + +use alloy_consensus::BlockHeader; +use alloy_primitives::{TxHash, B256}; +use reth_primitives_traits::NodePrimitives; +use reth_primitives_traits::SealedHeader; +use reth_rpc_eth_types::{block::BlockAndReceipts, PendingBlock}; + +/// The pending flashblocks sequence built with all received `OpFlashblockPayload` +/// alongside the metadata for the last added flashblock. +#[derive(Debug, Clone, Deref)] +pub struct PendingSequence { + /// Locally built full pending block of the latest flashblocks sequence. + #[deref] + pub pending: PendingBlock, + /// Transaction index: tx hash → cached tx info for O(1) tx/receipt lookups. + pub tx_index: HashMap>, + /// The current block hash of the latest flashblocks sequence. + pub block_hash: B256, + /// Parent hash of the built block (may be non-canonical or canonical). + pub parent_header: SealedHeader, + /// Prefix execution metadata for incremental builds. + pub prefix_execution_meta: PrefixExecutionMeta, + /// Target index of the latest flashblock in the sequence. + pub target_index: u64, +} + +impl PendingSequence { + pub fn get_hash(&self) -> B256 { + self.block_hash + } + + pub fn get_height(&self) -> u64 { + self.pending.block().number() + } + + pub fn get_block_and_receipts(&self) -> BlockAndReceipts { + self.pending.to_block_and_receipts() + } + + pub fn get_tx_info(&self, tx_hash: &TxHash) -> Option<(CachedTxInfo, BlockAndReceipts)> { + self.tx_index + .get(tx_hash) + .cloned() + .map(|tx_info| (tx_info, self.pending.to_block_and_receipts())) + } + + pub fn get_last_flashblock_index(&self) -> u64 { + self.prefix_execution_meta.last_flashblock_index + } + + pub fn is_target_flashblock(&self) -> bool { + self.target_index > 0 && self.get_last_flashblock_index() >= self.target_index + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{make_executed_block, mock_tx}; + use std::{collections::HashMap, time::Instant}; + + use alloy_consensus::Receipt; + use alloy_primitives::B256; + use op_alloy_consensus::OpReceipt; + + use reth_optimism_primitives::OpPrimitives; + use reth_rpc_eth_types::PendingBlock; + + fn make_pending_sequence(block_number: u64) -> PendingSequence { + let executed = make_executed_block(block_number, B256::ZERO); + let block_hash = executed.recovered_block.hash(); + let pending_block = PendingBlock::with_executed_block(Instant::now(), executed); + PendingSequence { + pending: pending_block, + tx_index: HashMap::new(), + block_hash, + parent_header: Default::default(), + prefix_execution_meta: Default::default(), + target_index: 0, + } + } + + fn make_pending_sequence_with_txs( + block_number: u64, + tx_count: usize, + ) -> PendingSequence { + let executed = make_executed_block(block_number, B256::ZERO); + let block_hash = executed.recovered_block.hash(); + let mut tx_index = HashMap::new(); + for i in 0..tx_count { + let tx = mock_tx(i as u64); + let tx_hash = tx.tx_hash(); + let receipt = OpReceipt::Eip7702(Receipt { + status: true.into(), + cumulative_gas_used: 21_000 * (i as u64 + 1), + logs: vec![], + }); + tx_index.insert( + tx_hash, + CachedTxInfo { block_number, block_hash, tx_index: i as u64, tx, receipt }, + ); + } + + let pending_block = PendingBlock::with_executed_block(Instant::now(), executed); + PendingSequence { + pending: pending_block, + tx_index, + block_hash, + parent_header: Default::default(), + prefix_execution_meta: Default::default(), + target_index: 0, + } + } + + #[test] + fn test_pending_sequence_get_hash_returns_stored_block_hash() { + let cache = make_pending_sequence(42); + assert_eq!(cache.get_hash(), cache.block_hash); + } + + #[test] + fn test_pending_sequence_get_height_returns_block_number() { + let cache = make_pending_sequence(99); + assert_eq!(cache.get_height(), 99); + } + + #[test] + fn test_pending_sequence_get_block_and_receipts_empty_receipts_on_no_tx_block() { + let cache = make_pending_sequence(3); + let bar = cache.get_block_and_receipts(); + assert!(bar.receipts.is_empty()); + } + + #[test] + fn test_pending_sequence_get_tx_info_returns_none_for_unknown_hash() { + let cache = make_pending_sequence_with_txs(10, 2); + assert!(cache.get_tx_info(&B256::repeat_byte(0xFF)).is_none()); + } + + #[test] + fn test_pending_sequence_get_tx_info_returns_correct_info_for_known_tx() { + let cache = make_pending_sequence_with_txs(42, 3); + let (tx_hash, expected_info) = cache.tx_index.iter().next().unwrap(); + let (info, bar) = cache.get_tx_info(tx_hash).expect("known tx hash should return Some"); + assert_eq!(info.block_number, 42); + assert_eq!(info.block_hash, cache.block_hash); + assert_eq!(info.tx_index, expected_info.tx_index); + assert_eq!(*info.tx.tx_hash(), *tx_hash); + assert_eq!(bar.block.number(), 42); + } +} diff --git a/crates/flashblocks/src/payload.rs b/crates/flashblocks/src/cache/raw.rs similarity index 54% rename from crates/flashblocks/src/payload.rs rename to crates/flashblocks/src/cache/raw.rs index 503e8409..68b9a080 100644 --- a/crates/flashblocks/src/payload.rs +++ b/crates/flashblocks/src/cache/raw.rs @@ -1,59 +1,621 @@ -use alloy_consensus::BlockHeader; -use alloy_primitives::B256; -use derive_more::Deref; -use reth_primitives_traits::NodePrimitives; -use reth_rpc_eth_types::PendingBlock; - -/// Type alias for the Optimism flashblock payload. -pub type FlashBlock = op_alloy_rpc_types_engine::OpFlashblockPayload; - -/// The pending block built with all received Flashblocks alongside the metadata for the last added -/// Flashblock. -#[derive(Debug, Clone, Deref)] -pub struct PendingFlashBlock { - /// The complete pending block built out of all received Flashblocks. - #[deref] - pub pending: PendingBlock, - /// Canonical anchor hash used for state lookups when this block was built. - /// - /// For canonical builds this equals `pending.block().parent_hash()`. - /// For speculative builds this points to the canonical ancestor used for storage reads. - pub canonical_anchor_hash: B256, - /// A sequential index that identifies the last Flashblock added to this block. - pub last_flashblock_index: u64, - /// The last Flashblock block hash, - pub last_flashblock_hash: B256, - /// Whether the [`PendingBlock`] has a properly computed stateroot. - pub has_computed_state_root: bool, +use crate::execution::BuildArgs; +use parking_lot::RwLock; +use ringbuffer::{AllocRingBuffer, RingBuffer}; +use std::{collections::BTreeMap, sync::Arc}; + +use alloy_eips::{eip2718::WithEncoded, eip4895::Withdrawal}; +use alloy_rpc_types_engine::PayloadId; +use op_alloy_rpc_types_engine::{OpFlashblockPayload, OpFlashblockPayloadBase}; +use reth_primitives_traits::{Recovered, SignedTransaction}; + +use xlayer_builder::broadcast::XLayerFlashblockPayload; + +const MAX_RAW_CACHE_SIZE: usize = 10; + +/// The raw flashblocks sequence cache for new incoming flashblocks from the sequencer. +/// The cache accumulates last two flashblocks sequences in memory, to handle scenario +/// when flashblocks received are out-of-order, and committing the previous sequence +/// state to the state cache is not yet possible due to parent hash mismatch (we still +/// need the previous flashblocks sequence to compute the state root). +/// +/// The raw cache is used to: +/// 1. Track the next best sequence to build, based on cache state (consecutive flashblocks +/// required) +/// 2. Re-org detection when a new flashblock is received +pub struct RawFlashblocksCache { + inner: Arc>>, } -impl PendingFlashBlock { - /// Create new pending flashblock. - pub const fn new( - pending: PendingBlock, - canonical_anchor_hash: B256, - last_flashblock_index: u64, - last_flashblock_hash: B256, - has_computed_state_root: bool, - ) -> Self { +impl RawFlashblocksCache { + pub fn new() -> Self { + let inner = Arc::new(RwLock::new(RawFlashblocksCacheInner::new())); + Self { inner } + } + + pub fn handle_canonical_height(&self, height: u64) { + self.inner.write().handle_canonical_height(height); + } + + pub fn handle_flashblock(&self, payload: XLayerFlashblockPayload) -> eyre::Result<()> { + self.inner.write().handle_flashblock(payload) + } + + pub(crate) fn try_get_buildable_args( + &self, + height: u64, + ) -> Option>>>> { + self.inner.read().try_get_buildable_args(height) + } +} + +#[derive(Debug, Clone)] +pub struct RawFlashblocksCacheInner { + cache: AllocRingBuffer>, + canon_height: u64, +} + +impl RawFlashblocksCacheInner { + fn new() -> Self { + Self { cache: AllocRingBuffer::new(MAX_RAW_CACHE_SIZE), canon_height: 0 } + } + + pub fn handle_canonical_height(&mut self, height: u64) { + self.canon_height = height; + // Evict all entries whose height is at or below the new canonical height. + let retained: Vec<_> = self + .cache + .drain() + .filter(|entry| entry.block_number().is_none_or(|n| n > height)) + .collect(); + for entry in retained { + self.cache.enqueue(entry); + } + } + + pub fn handle_flashblock(&mut self, payload: XLayerFlashblockPayload) -> eyre::Result<()> { + let XLayerFlashblockPayload { inner: flashblock, target_index } = payload; + let incoming_height = flashblock.block_number(); + if incoming_height <= self.canon_height { + return Err(eyre::eyre!( + "Received old flashblock behind canonical height, skip adding to raw cache: flashblock_number={}, canon_height={}", + incoming_height, + self.canon_height, + )); + } + + // Search for an existing entry matching this payload_id. + let existing = + self.cache.iter_mut().find(|entry| entry.payload_id() == Some(flashblock.payload_id)); + + if let Some(entry) = existing { + entry.insert_flashblock(flashblock, target_index)?; + } else { + // New sequence — push to ring buffer, evicting the oldest entry + // when the cache is full. + let mut entry = RawFlashblocksEntry::new(); + entry.insert_flashblock(flashblock, target_index)?; + self.cache.enqueue(entry); + } + Ok(()) + } + + fn try_get_buildable_args( + &self, + height: u64, + ) -> Option>>>> { + // Iterate newest-first so that the most recent entry is always picked first + // (same height, different payload_id). + self.cache + .iter() + .rev() + .find(|entry| entry.block_number() == Some(height)) + .and_then(|entry| entry.try_to_buildable_args()) + } +} + +/// Raw flashblocks sequence keeps track of the flashblocks sequence based on their +/// `payload_id`. +#[derive(Debug, Clone)] +struct RawFlashblocksEntry { + /// Tracks the individual flashblocks in order + payloads: BTreeMap, + /// Tracks the recovered transactions by index + recovered_transactions_by_index: BTreeMap>>>, + /// Tracks if the accumulated sequence has received the first base flashblock + has_base: bool, + /// The sequencer's target flashblock index. Zero if unset. + target_index: u64, +} + +impl RawFlashblocksEntry { + fn new() -> Self { Self { - pending, - canonical_anchor_hash, - last_flashblock_index, - last_flashblock_hash, - has_computed_state_root, + payloads: BTreeMap::new(), + recovered_transactions_by_index: BTreeMap::new(), + has_base: false, + target_index: 0, } } - /// Returns the properly calculated state root for that block if it was computed. - pub fn computed_state_root(&self) -> Option { - self.has_computed_state_root.then_some(self.pending.block().state_root()) + /// Inserts a flashblock into the sequence. + fn insert_flashblock( + &mut self, + flashblock: OpFlashblockPayload, + target_index: u64, + ) -> eyre::Result<()> { + if !self.can_accept(&flashblock) { + return Err(eyre::eyre!( + "Incoming flashblock failed to be accepted into the sequence, possible re-org detected: incoming_id={:?}, current_id={:?}, incoming_height={}, current_height={:?}", + flashblock.payload_id, + self.payload_id(), + flashblock.block_number(), + self.block_number(), + )); + } + + if flashblock.index == 0 { + self.has_base = true; + } + if target_index > 0 { + self.target_index = target_index; + } + let flashblock_index = flashblock.index; + let recovered_txs = flashblock.recover_transactions().collect::, _>>()?; + self.payloads.insert(flashblock_index, flashblock); + self.recovered_transactions_by_index.insert(flashblock_index, recovered_txs); + Ok(()) + } + + /// Returns whether this flashblock would be accepted into the current sequence. + fn can_accept(&self, flashblock: &OpFlashblockPayload) -> bool { + if self.payloads.is_empty() { + return true; + } + self.block_number() == Some(flashblock.block_number()) + && self.payload_id() == Some(flashblock.payload_id) + && !self.payloads.contains_key(&flashblock.index) + } + + fn try_get_best_revision(&self) -> Option { + if !self.has_base || self.payloads.is_empty() { + return None; + } + let mut new_revision = 0; + for (index, _) in self.payloads.iter() { + if *index == 0 { + continue; + } + // If the index is not consecutive, break the loop + if new_revision != *index - 1 { + break; + } + new_revision = *index; + } + Some(new_revision) + } + + pub fn block_number(&self) -> Option { + Some(self.payloads.values().next()?.block_number()) + } + + pub fn payload_id(&self) -> Option { + Some(self.payloads.values().next()?.payload_id) + } + + fn base(&self) -> Option<&OpFlashblockPayloadBase> { + self.payloads.get(&0)?.base.as_ref() + } + + fn withdrawals_at(&self, index: u64) -> Vec { + // Per the OP Stack flashblocks spec, each diff's `withdrawals` field is cumulative + // (the complete list for the entire block), not incremental + self.payloads.get(&index).map(|p| p.diff.withdrawals.clone()).unwrap_or_default() + } + + fn transactions_up_to(&self, up_to: u64) -> Vec>> { + self.recovered_transactions_by_index + .range(..=up_to) + .flat_map(|(_, txs)| txs.iter().cloned()) + .collect() + } + + fn try_to_buildable_args(&self) -> Option>>>> { + let best_revision = self.try_get_best_revision()?; + Some(BuildArgs { + base: self.base()?.clone(), + payload_id: self.payload_id()?, + transactions: self.transactions_up_to(best_revision), + withdrawals: self.withdrawals_at(best_revision), + last_flashblock_index: best_revision, + target_index: self.target_index, + }) + } + + #[cfg(test)] + fn transaction_count(&self) -> usize { + self.recovered_transactions_by_index.values().map(Vec::len).sum() } } #[cfg(test)] mod tests { use super::*; + use crate::test_utils::TestFlashBlockFactory; + use reth_optimism_primitives::OpTransactionSigned; + + type TestRawCache = RawFlashblocksCacheInner; + + /// Wraps an [`OpFlashblockPayload`] into an [`XLayerFlashblockPayload`] with + /// `target_index: 0` for tests that don't care about the target count. + fn wrap(fb: OpFlashblockPayload) -> XLayerFlashblockPayload { + XLayerFlashblockPayload::new(fb, 0) + } + + #[test] + fn test_raw_entry_can_accept_first_flashblock_on_empty_entry() { + // Arrange + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let mut cache = TestRawCache::new(); + + // Act + let result = cache.handle_flashblock(wrap(fb0)); + + // Assert: empty entry accepts anything without error + assert!(result.is_ok(), "empty entry should accept first flashblock"); + assert_eq!(cache.cache.len(), 1); + } + + #[test] + fn test_raw_entry_rejects_duplicate_index_in_same_sequence() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let fb0_dup = factory.flashblock_at(0).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb0)).expect("first flashblock should succeed"); + let result = cache.handle_flashblock(wrap(fb0_dup)); + assert!(result.is_err(), "duplicate index within same sequence should be rejected"); + } + + #[test] + fn test_raw_entry_rejects_mismatched_block_number() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let payload_id = fb0.payload_id; + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb0)).expect("first flashblock should succeed"); + let fb_wrong_block = factory + .builder() + .index(1) + .block_number(999) // different block number + .payload_id(payload_id) + .build(); + let result = cache.handle_flashblock(wrap(fb_wrong_block)); + assert!(result.is_err(), "mismatched block number with same payload_id should be rejected"); + assert_eq!(cache.cache.len(), 1, "rejected flashblock should not create a new entry"); + } + + #[test] + fn test_raw_entry_accepts_out_of_order_flashblocks_within_same_sequence() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let payload_id = fb0.payload_id; + let fb2 = factory.builder().index(2).block_number(100).payload_id(payload_id).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb0)).expect("fb0 insert"); + let result = cache.handle_flashblock(wrap(fb2)); + assert!(result.is_ok(), "out-of-order unique index should be accepted"); + } + + #[test] + fn test_raw_entry_get_best_revision_returns_none_without_base() { + let factory = TestFlashBlockFactory::new(); + let fb1 = factory.builder().index(1).block_number(100).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb1)).expect("fb1 insert"); + let entry = cache.cache.iter().next().expect("entry should exist"); + let best = entry.try_get_best_revision(); + assert!(best.is_none(), "get_best_revision should return None without base (index 0)"); + } + + #[test] + fn test_raw_entry_get_best_revision_returns_zero_with_only_base() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + + let mut cache = TestRawCache::new(); + cache.handle_flashblock(wrap(fb0)).expect("fb0 insert"); + let entry = cache.cache.iter().next().expect("entry should exist"); + let best = entry.try_get_best_revision(); + assert_eq!(best, Some(0), "only index 0 → best revision is 0"); + } + + #[test] + fn test_raw_entry_get_best_revision_with_consecutive_sequence() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let fb1 = factory.flashblock_after(&fb0).build(); + let fb2 = factory.flashblock_after(&fb1).build(); + let fb3 = factory.flashblock_after(&fb2).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb0)).expect("fb0"); + cache.handle_flashblock(wrap(fb1)).expect("fb1"); + cache.handle_flashblock(wrap(fb2)).expect("fb2"); + cache.handle_flashblock(wrap(fb3)).expect("fb3"); + let entry = cache.cache.iter().next().expect("entry should exist"); + let best = entry.try_get_best_revision(); + assert_eq!(best, Some(3), "consecutive 0..3 → best revision 3"); + } + + #[test] + fn test_raw_entry_get_best_revision_stops_at_gap() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let payload_id = fb0.payload_id; + let fb1 = factory.flashblock_after(&fb0).build(); + let fb3 = factory.builder().index(3).block_number(100).payload_id(payload_id).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb0)).expect("fb0"); + cache.handle_flashblock(wrap(fb1)).expect("fb1"); + cache.handle_flashblock(wrap(fb3)).expect("fb3 (gap after index 1)"); + let entry = cache.cache.iter().next().expect("entry should exist"); + let best = entry.try_get_best_revision(); + assert_eq!(best, Some(1), "gap between 1 and 3 → best revision is 1"); + } + + #[test] + fn test_raw_cache_handle_canonical_height_evicts_entries_at_or_below_height() { + let factory = TestFlashBlockFactory::new(); + let fb100 = factory.flashblock_at(0).build(); + let fb101 = factory.flashblock_for_next_block(&fb100).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb100)).expect("fb100"); + cache.handle_flashblock(wrap(fb101)).expect("fb101"); + assert_eq!(cache.cache.len(), 2); + cache.handle_canonical_height(100); + assert_eq!(cache.cache.len(), 1, "block 100 entry should be evicted"); + let remaining = cache.cache.iter().next().expect("one entry should remain"); + assert_eq!(remaining.block_number(), Some(101), "remaining entry should be for block 101"); + } + + #[test] + fn test_raw_cache_handle_canonical_height_evicts_multiple_entries() { + // Arrange: insert flashblocks for blocks 100, 101, 102 + let factory = TestFlashBlockFactory::new(); + let fb100 = factory.flashblock_at(0).build(); + let fb101 = factory.flashblock_for_next_block(&fb100).build(); + let fb102 = factory.flashblock_for_next_block(&fb101).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb100)).expect("fb100"); + cache.handle_flashblock(wrap(fb101)).expect("fb101"); + cache.handle_flashblock(wrap(fb102)).expect("fb102"); + assert_eq!(cache.cache.len(), 3); + cache.handle_canonical_height(102); + assert_eq!(cache.cache.len(), 0, "all entries at or below height 102 should be evicted"); + } + + #[test] + fn test_raw_cache_handle_canonical_height_keeps_entries_above_height() { + let factory = TestFlashBlockFactory::new(); + let fb100 = factory.flashblock_at(0).build(); + let fb101 = factory.flashblock_for_next_block(&fb100).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb100)).expect("fb100"); + cache.handle_flashblock(wrap(fb101)).expect("fb101"); + cache.handle_canonical_height(99); + assert_eq!(cache.cache.len(), 2, "no entries should be evicted below their block numbers"); + } + + #[test] + fn test_raw_cache_rejects_flashblock_at_or_below_canonical_height() { + let factory = TestFlashBlockFactory::new(); + let fb100 = factory.flashblock_at(0).build(); + let mut cache = TestRawCache::new(); + + cache.handle_canonical_height(100); + let result = cache.handle_flashblock(wrap(fb100)); + assert!(result.is_err(), "flashblock at canonical height should be rejected"); + assert_eq!(cache.cache.len(), 0, "flashblock at canonical height should not be inserted"); + } + + #[test] + fn test_raw_cache_groups_flashblocks_by_payload_id() { + let factory = TestFlashBlockFactory::new(); + let fb0_seq1 = factory.flashblock_at(0).build(); + let fb0_seq2 = factory.flashblock_for_next_block(&fb0_seq1).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb0_seq1.clone())).expect("seq1 fb0"); + cache.handle_flashblock(wrap(fb0_seq2.clone())).expect("seq2 fb0"); + let fb1_seq1 = factory.flashblock_after(&fb0_seq1).build(); + cache.handle_flashblock(wrap(fb1_seq1)).expect("seq1 fb1"); + let entries: Vec<_> = cache.cache.iter().collect(); + assert_eq!(entries.len(), 2, "should have two separate entries for two payload_ids"); + } + + #[test] + fn test_raw_cache_ring_buffer_evicts_oldest_entry_when_full() { + let factory = TestFlashBlockFactory::new(); + let mut prev_fb = factory.flashblock_at(0).build(); + let first_block_num = prev_fb.metadata.block_number; + let mut cache = TestRawCache::new(); + cache.handle_flashblock(wrap(prev_fb.clone())).expect("first fb"); + + // Fill up to MAX_RAW_CACHE_SIZE (10) unique sequences + for _ in 1..MAX_RAW_CACHE_SIZE { + let next_fb = factory.flashblock_for_next_block(&prev_fb).build(); + cache.handle_flashblock(wrap(next_fb.clone())).expect("fill fb"); + prev_fb = next_fb; + } + assert_eq!(cache.cache.len(), MAX_RAW_CACHE_SIZE, "cache should be at max capacity"); + + // Insert one more sequence to trigger FIFO eviction + let overflow_fb = factory.flashblock_for_next_block(&prev_fb).build(); + let overflow_block_num = overflow_fb.metadata.block_number; + cache.handle_flashblock(wrap(overflow_fb)).expect("overflow fb"); + + // Assert: cache is still at max size (oldest entry evicted) + assert_eq!(cache.cache.len(), MAX_RAW_CACHE_SIZE, "cache size should remain at max"); + // The oldest entry (first_block_num) should have been evicted + let has_first = cache.cache.iter().any(|e| e.block_number() == Some(first_block_num)); + let has_overflow = cache.cache.iter().any(|e| e.block_number() == Some(overflow_block_num)); + assert!(!has_first, "oldest entry should have been evicted"); + assert!(has_overflow, "newest entry should be present"); + } + + #[test] + fn test_raw_entry_block_number_returns_none_on_empty() { + let entry = RawFlashblocksEntry::::new(); + assert!(entry.block_number().is_none()); + } + + #[test] + fn test_raw_entry_block_number_returns_correct_value_after_insert() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let expected_block = fb0.metadata.block_number; + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb0)).expect("fb0 insert"); + let entry = cache.cache.iter().next().expect("entry should exist"); + assert_eq!(entry.block_number(), Some(expected_block)); + } + + #[test] + fn test_raw_entry_transaction_count_is_zero_on_empty_flashblock() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); // no transactions set + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb0)).expect("fb0 insert"); + let entry = cache.cache.iter().next().expect("entry should exist"); + assert_eq!(entry.transaction_count(), 0, "flashblock with no txs should have count 0"); + } + + #[test] + fn test_raw_entry_has_base_set_after_inserting_index_zero() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb0)).expect("fb0 insert"); + let entry = cache.cache.iter().next().expect("entry should exist"); + assert!(entry.has_base, "has_base should be true after inserting index 0"); + } + + #[test] + fn test_raw_entry_has_base_not_set_when_only_non_zero_index_inserted() { + let factory = TestFlashBlockFactory::new(); + let fb1 = factory.builder().index(1).block_number(100).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb1)).expect("fb1 insert"); + let entry = cache.cache.iter().next().expect("entry should exist"); + assert!(!entry.has_base, "has_base should be false when only index 1 inserted"); + } + + #[test] + fn test_raw_flashblocks_cache_handle_flashblock_inserts_via_arc_rwlock() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let cache = RawFlashblocksCache::::new(); + + let result = cache.handle_flashblock(wrap(fb0)); + assert!(result.is_ok(), "handle_flashblock via Arc wrapper should succeed"); + } + + #[test] + fn test_raw_entry_get_best_revision_with_only_index_one_no_base() { + let factory = TestFlashBlockFactory::new(); + let fb1 = factory.builder().index(1).block_number(100).build(); + + let mut cache = TestRawCache::new(); + cache.handle_flashblock(wrap(fb1)).expect("fb1 insert"); + let entry = cache.cache.iter().next().expect("entry should exist"); + let best = entry.try_get_best_revision(); + // Assert: no base → None, even though index 1 exists + assert!(best.is_none(), "no base means get_best_revision must return None"); + } + + #[test] + fn test_raw_entry_get_best_revision_gap_immediately_after_base() { + // Arrange: only index 0 and index 2, no index 1 + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let payload_id = fb0.payload_id; + let block_number = fb0.metadata.block_number; + let fb2 = + factory.builder().index(2).block_number(block_number).payload_id(payload_id).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb0)).expect("fb0"); + cache.handle_flashblock(wrap(fb2)).expect("fb2"); + let entry = cache.cache.iter().next().expect("entry should exist"); + let best = entry.try_get_best_revision(); + // Assert: gap immediately after base (index 1 missing) → best revision is 0 + assert_eq!(best, Some(0), "gap at index 1 means best revision stays at 0"); + } + + // --- can_accept edge cases --- + + #[test] + fn test_raw_entry_can_accept_rejects_mismatched_payload_id_with_same_block_number() { + // Arrange: insert fb with payload_id A at block 100 + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let different_payload_id = alloy_rpc_types_engine::PayloadId::new([ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + ]); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb0.clone())).expect("fb0 insert"); + let fb_diff = factory + .builder() + .index(0) + .block_number(fb0.metadata.block_number) + .payload_id(different_payload_id) + .build(); + let result = cache.handle_flashblock(wrap(fb_diff)); + // Assert: new entry created (no error), but we now have 2 entries + assert!(result.is_ok(), "different payload_id with same block creates new entry"); + assert_eq!( + cache.cache.len(), + 2, + "different payload_id should produce a second cache entry" + ); + } + + #[test] + fn test_raw_cache_accumulates_flashblocks_into_single_entry_for_same_payload_id() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let fb1 = factory.flashblock_after(&fb0).build(); + let fb2 = factory.flashblock_after(&fb1).build(); + let fb3 = factory.flashblock_after(&fb2).build(); + let mut cache = TestRawCache::new(); + + cache.handle_flashblock(wrap(fb0)).expect("fb0"); + cache.handle_flashblock(wrap(fb1)).expect("fb1"); + cache.handle_flashblock(wrap(fb2)).expect("fb2"); + cache.handle_flashblock(wrap(fb3)).expect("fb3"); + // Assert: all four go into a single entry (same payload_id) + assert_eq!( + cache.cache.len(), + 1, + "all flashblocks with the same payload_id should accumulate into one entry" + ); + let entry = cache.cache.iter().next().expect("entry should exist"); + assert_eq!(entry.payloads.len(), 4, "entry should contain 4 payloads"); + } #[test] fn test_flashblock_serde_roundtrip() { @@ -246,9 +808,9 @@ mod tests { "payload_id": "0x0316ecb1aa1671b5" }"#; - let flashblock: FlashBlock = serde_json::from_str(raw).expect("deserialize"); + let flashblock: OpFlashblockPayload = serde_json::from_str(raw).expect("deserialize"); let serialized = serde_json::to_string(&flashblock).expect("serialize"); - let roundtrip: FlashBlock = serde_json::from_str(&serialized).expect("roundtrip"); + let roundtrip: OpFlashblockPayload = serde_json::from_str(&serialized).expect("roundtrip"); assert_eq!(flashblock, roundtrip); } diff --git a/crates/flashblocks/src/cache/utils.rs b/crates/flashblocks/src/cache/utils.rs new file mode 100644 index 00000000..d4652a37 --- /dev/null +++ b/crates/flashblocks/src/cache/utils.rs @@ -0,0 +1,46 @@ +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use alloy_consensus::{BlockHeader, Header}; + use alloy_primitives::B256; + use reth_optimism_primitives::{OpBlock, OpPrimitives}; + use reth_primitives_traits::{Block, BlockTy, NodePrimitives}; + use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; + use reth_rpc_eth_types::block::BlockAndReceipts; + + pub(crate) fn block_from_bar(bar: &BlockAndReceipts) -> BlockTy { + BlockTy::::new(bar.block.header().clone(), bar.block.body().clone()) + } + + fn make_block_and_receipts( + block_number: u64, + parent_hash: B256, + ) -> BlockAndReceipts { + let header = Header { number: block_number, parent_hash, ..Default::default() }; + let sealed_header = SealedHeader::seal_slow(header); + let block = OpBlock::new(sealed_header.unseal(), Default::default()); + let sealed_block = SealedBlock::seal_slow(block); + let recovered_block = RecoveredBlock::new_sealed(sealed_block, vec![]); + BlockAndReceipts { block: Arc::new(recovered_block), receipts: Arc::new(vec![]) } + } + + #[test] + fn test_block_from_bar_returns_block_with_correct_number() { + let bar = make_block_and_receipts(42, B256::ZERO); + let block = block_from_bar::(&bar); + assert_eq!(block.header().number(), 42, "block_from_bar should preserve the block number"); + } + + #[test] + fn test_block_from_bar_returns_block_with_correct_parent_hash() { + let parent = B256::repeat_byte(0xBE); + let bar = make_block_and_receipts(10, parent); + let block = block_from_bar::(&bar); + assert_eq!( + block.header().parent_hash(), + parent, + "block_from_bar should preserve the parent hash" + ); + } +} diff --git a/crates/flashblocks/src/consensus.rs b/crates/flashblocks/src/consensus.rs deleted file mode 100644 index 453d9bff..00000000 --- a/crates/flashblocks/src/consensus.rs +++ /dev/null @@ -1,458 +0,0 @@ -use crate::{FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx}; -use alloy_primitives::B256; -use alloy_rpc_types_engine::PayloadStatusEnum; -use op_alloy_rpc_types_engine::OpExecutionData; -use reth_engine_primitives::ConsensusEngineHandle; -use reth_optimism_payload_builder::OpPayloadTypes; -use reth_payload_primitives::{EngineApiMessageVersion, ExecutionPayload, PayloadTypes}; -use tracing::*; - -/// Consensus client that sends FCUs and new payloads using blocks from a [`FlashBlockService`]. -/// -/// This client receives completed flashblock sequences and: -/// - Attempts to submit `engine_newPayload` if `state_root` is available (non-zero) -/// - Always sends `engine_forkChoiceUpdated` to drive chain forward -/// -/// [`FlashBlockService`]: crate::FlashBlockService -#[derive(Debug)] -pub struct FlashBlockConsensusClient

-where - P: PayloadTypes, -{ - /// Handle to execution client. - engine_handle: ConsensusEngineHandle

, - /// Receiver for completed flashblock sequences from `FlashBlockService`. - sequence_receiver: FlashBlockCompleteSequenceRx, -} - -impl

FlashBlockConsensusClient

-where - P: PayloadTypes, - P::ExecutionData: for<'a> TryFrom<&'a FlashBlockCompleteSequence, Error: std::fmt::Display>, -{ - /// Create a new `FlashBlockConsensusClient` with the given Op engine and sequence receiver. - pub const fn new( - engine_handle: ConsensusEngineHandle

, - sequence_receiver: FlashBlockCompleteSequenceRx, - ) -> eyre::Result { - Ok(Self { engine_handle, sequence_receiver }) - } - - /// Attempts to submit a new payload to the engine. - /// - /// The `TryFrom` conversion will fail if `execution_outcome.state_root` is `B256::ZERO`, - /// in which case this method uses the `parent_hash` instead to drive the chain forward. - /// - /// Returns the block hash to use for FCU (either the new block's hash or the parent hash). - async fn submit_new_payload(&self, sequence: &FlashBlockCompleteSequence) -> B256 { - let payload = match P::ExecutionData::try_from(sequence) { - Ok(payload) => payload, - Err(err) => { - trace!(target: "flashblocks", %err, "Failed payload conversion, using parent hash"); - return sequence.payload_base().parent_hash; - } - }; - - let block_number = payload.block_number(); - let block_hash = payload.block_hash(); - match self.engine_handle.new_payload(payload).await { - Ok(result) => { - debug!( - target: "flashblocks", - flashblock_count = sequence.count(), - block_number, - %block_hash, - ?result, - "Submitted engine_newPayload", - ); - - if let PayloadStatusEnum::Invalid { validation_error } = result.status { - debug!( - target: "flashblocks", - block_number, - %block_hash, - %validation_error, - "Payload validation error", - ); - }; - } - Err(err) => { - error!( - target: "flashblocks", - %err, - block_number, - "Failed to submit new payload", - ); - } - } - - block_hash - } - - /// Submit a forkchoice update to the engine. - async fn submit_forkchoice_update( - &self, - head_block_hash: B256, - sequence: &FlashBlockCompleteSequence, - ) { - let block_number = sequence.block_number(); - let safe_hash = sequence.payload_base().parent_hash; - let finalized_hash = sequence.payload_base().parent_hash; - let fcu_state = alloy_rpc_types_engine::ForkchoiceState { - head_block_hash, - safe_block_hash: safe_hash, - finalized_block_hash: finalized_hash, - }; - - match self - .engine_handle - .fork_choice_updated(fcu_state, None, EngineApiMessageVersion::V5) - .await - { - Ok(result) => { - debug!( - target: "flashblocks", - flashblock_count = sequence.count(), - block_number, - %head_block_hash, - %safe_hash, - %finalized_hash, - ?result, - "Submitted engine_forkChoiceUpdated", - ) - } - Err(err) => { - error!( - target: "flashblocks", - %err, - block_number, - %head_block_hash, - %safe_hash, - %finalized_hash, - "Failed to submit fork choice update", - ); - } - } - } - - /// Runs the consensus client loop. - /// - /// Continuously receives completed flashblock sequences and submits them to the execution - /// engine: - /// 1. Attempts `engine_newPayload` (only if `state_root` is available) - /// 2. Always sends `engine_forkChoiceUpdated` to drive chain forward - pub async fn run(mut self) { - loop { - let Ok(sequence) = self.sequence_receiver.recv().await else { - continue; - }; - - // Returns block_hash for FCU: - // - If state_root is available: submits newPayload and returns the new block's hash - // - If state_root is zero: skips newPayload and returns parent_hash (no progress yet) - let block_hash = self.submit_new_payload(&sequence).await; - - self.submit_forkchoice_update(block_hash, &sequence).await; - } - } -} - -impl TryFrom<&FlashBlockCompleteSequence> for OpExecutionData { - type Error = &'static str; - - fn try_from(sequence: &FlashBlockCompleteSequence) -> Result { - let mut data = Self::from_flashblocks_unchecked(sequence); - - // If execution outcome is available, use the computed state_root and block_hash. - // FlashBlockService computes these when building sequences on top of the local tip. - if let Some(execution_outcome) = sequence.execution_outcome() { - let payload = data.payload.as_v1_mut(); - payload.state_root = execution_outcome.state_root; - payload.block_hash = execution_outcome.block_hash; - } - - // Only proceed if we have a valid state_root (non-zero). - if data.payload.as_v1_mut().state_root == B256::ZERO { - return Err("No state_root available for payload"); - } - - Ok(data) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{sequence::SequenceExecutionOutcome, test_utils::TestFlashBlockFactory}; - - mod op_execution_data_conversion { - use super::*; - - #[test] - fn test_try_from_fails_with_zero_state_root() { - // When execution_outcome is None, state_root remains zero and conversion fails - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - - let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); - - let result = OpExecutionData::try_from(&sequence); - assert!(result.is_err()); - assert_eq!(result.unwrap_err(), "No state_root available for payload"); - } - - #[test] - fn test_try_from_succeeds_with_execution_outcome() { - // When execution_outcome has state_root, conversion succeeds - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - - let execution_outcome = SequenceExecutionOutcome { - block_hash: B256::random(), - state_root: B256::random(), // Non-zero - }; - - let sequence = - FlashBlockCompleteSequence::new(vec![fb0], Some(execution_outcome)).unwrap(); - - let result = OpExecutionData::try_from(&sequence); - assert!(result.is_ok()); - - let mut data = result.unwrap(); - assert_eq!(data.payload.as_v1_mut().state_root, execution_outcome.state_root); - assert_eq!(data.payload.as_v1_mut().block_hash, execution_outcome.block_hash); - } - - #[test] - fn test_try_from_succeeds_with_provided_state_root() { - // When sequencer provides non-zero state_root, conversion succeeds - let factory = TestFlashBlockFactory::new(); - let provided_state_root = B256::random(); - let fb0 = factory.flashblock_at(0).state_root(provided_state_root).build(); - - let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); - - let result = OpExecutionData::try_from(&sequence); - assert!(result.is_ok()); - - let mut data = result.unwrap(); - assert_eq!(data.payload.as_v1_mut().state_root, provided_state_root); - } - - #[test] - fn test_try_from_execution_outcome_overrides_provided_state_root() { - // execution_outcome takes precedence over sequencer-provided state_root - let factory = TestFlashBlockFactory::new(); - let provided_state_root = B256::random(); - let fb0 = factory.flashblock_at(0).state_root(provided_state_root).build(); - - let execution_outcome = SequenceExecutionOutcome { - block_hash: B256::random(), - state_root: B256::random(), // Different from provided - }; - - let sequence = - FlashBlockCompleteSequence::new(vec![fb0], Some(execution_outcome)).unwrap(); - - let result = OpExecutionData::try_from(&sequence); - assert!(result.is_ok()); - - let mut data = result.unwrap(); - // Should use execution_outcome, not the provided state_root - assert_eq!(data.payload.as_v1_mut().state_root, execution_outcome.state_root); - assert_ne!(data.payload.as_v1_mut().state_root, provided_state_root); - } - - #[test] - fn test_try_from_with_multiple_flashblocks() { - // Test conversion with sequence of multiple flashblocks - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - let fb1 = factory.flashblock_after(&fb0).state_root(B256::ZERO).build(); - let fb2 = factory.flashblock_after(&fb1).state_root(B256::ZERO).build(); - - let execution_outcome = - SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; - - let sequence = - FlashBlockCompleteSequence::new(vec![fb0, fb1, fb2], Some(execution_outcome)) - .unwrap(); - - let result = OpExecutionData::try_from(&sequence); - assert!(result.is_ok()); - - let mut data = result.unwrap(); - assert_eq!(data.payload.as_v1_mut().state_root, execution_outcome.state_root); - assert_eq!(data.payload.as_v1_mut().block_hash, execution_outcome.block_hash); - } - } - - mod consensus_client_creation { - use super::*; - use tokio::sync::broadcast; - - #[test] - fn test_new_creates_client() { - let (engine_tx, _) = tokio::sync::mpsc::unbounded_channel(); - let engine_handle = ConsensusEngineHandle::::new(engine_tx); - - let (_, sequence_rx) = broadcast::channel(1); - - let result = FlashBlockConsensusClient::new(engine_handle, sequence_rx); - assert!(result.is_ok()); - } - } - - mod submit_new_payload_behavior { - use super::*; - - #[test] - fn test_submit_new_payload_returns_parent_hash_when_no_state_root() { - // When conversion fails (no state_root), should return parent_hash - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - - let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); - - // Verify conversion would fail - let conversion_result = OpExecutionData::try_from(&sequence); - assert!(conversion_result.is_err()); - - // In the actual run loop, submit_new_payload would return parent_hash - assert_eq!(sequence.payload_base().parent_hash, parent_hash); - } - - #[test] - fn test_submit_new_payload_returns_block_hash_when_state_root_available() { - // When conversion succeeds, should return the new block's hash - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - - let execution_outcome = - SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; - - let sequence = - FlashBlockCompleteSequence::new(vec![fb0], Some(execution_outcome)).unwrap(); - - // Verify conversion succeeds - let conversion_result = OpExecutionData::try_from(&sequence); - assert!(conversion_result.is_ok()); - - let mut data = conversion_result.unwrap(); - assert_eq!(data.payload.as_v1_mut().block_hash, execution_outcome.block_hash); - } - } - - mod forkchoice_update_behavior { - use super::*; - - #[test] - fn test_forkchoice_state_uses_parent_hash_for_safe_and_finalized() { - // Both safe_hash and finalized_hash should be set to parent_hash - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - - let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); - - // Verify the expected forkchoice state - assert_eq!(sequence.payload_base().parent_hash, parent_hash); - } - - #[test] - fn test_forkchoice_update_with_new_block_hash() { - // When newPayload succeeds, FCU should use the new block's hash as head - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - - let execution_outcome = - SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; - - let sequence = - FlashBlockCompleteSequence::new(vec![fb0], Some(execution_outcome)).unwrap(); - - // The head_block_hash for FCU would be execution_outcome.block_hash - assert_eq!( - sequence.execution_outcome().unwrap().block_hash, - execution_outcome.block_hash - ); - } - - #[test] - fn test_forkchoice_update_with_parent_hash_when_no_state_root() { - // When newPayload is skipped (no state_root), FCU should use parent_hash as head - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - let parent_hash = fb0.base.as_ref().unwrap().parent_hash; - - let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); - - // The head_block_hash for FCU would be parent_hash (fallback) - assert_eq!(sequence.payload_base().parent_hash, parent_hash); - } - } - - mod run_loop_logic { - use super::*; - - #[test] - fn test_run_loop_processes_sequence_with_state_root() { - // Scenario: Sequence with state_root should trigger both newPayload and FCU - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - - let execution_outcome = - SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; - - let sequence = - FlashBlockCompleteSequence::new(vec![fb0], Some(execution_outcome)).unwrap(); - - // Verify sequence is ready for newPayload - let conversion = OpExecutionData::try_from(&sequence); - assert!(conversion.is_ok()); - } - - #[test] - fn test_run_loop_processes_sequence_without_state_root() { - // Scenario: Sequence without state_root should skip newPayload but still do FCU - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - - let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); - - // Verify sequence cannot be converted (newPayload will be skipped) - let conversion = OpExecutionData::try_from(&sequence); - assert!(conversion.is_err()); - - // But FCU should still happen with parent_hash - assert!(sequence.payload_base().parent_hash != B256::ZERO); - } - - #[test] - fn test_run_loop_handles_multiple_sequences() { - // Multiple sequences should be processed independently - let factory = TestFlashBlockFactory::new(); - - // Sequence 1: With state_root - let fb0_seq1 = factory.flashblock_at(0).state_root(B256::ZERO).build(); - let outcome1 = - SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; - let seq1 = - FlashBlockCompleteSequence::new(vec![fb0_seq1.clone()], Some(outcome1)).unwrap(); - - // Sequence 2: Without state_root (for next block) - let fb0_seq2 = factory.flashblock_for_next_block(&fb0_seq1).build(); - let seq2 = FlashBlockCompleteSequence::new(vec![fb0_seq2], None).unwrap(); - - // Both should be valid sequences - assert_eq!(seq1.block_number(), 100); - assert_eq!(seq2.block_number(), 101); - - // seq1 can be converted - assert!(OpExecutionData::try_from(&seq1).is_ok()); - // seq2 cannot be converted - assert!(OpExecutionData::try_from(&seq2).is_err()); - } - } -} diff --git a/crates/flashblocks/src/debug.rs b/crates/flashblocks/src/debug.rs new file mode 100644 index 00000000..ce03d0f5 --- /dev/null +++ b/crates/flashblocks/src/debug.rs @@ -0,0 +1,182 @@ +use crate::cache::FlashblockStateCache; +use reth_chain_state::ExecutedBlock; +use reth_primitives_traits::NodePrimitives; +use tracing::{debug, info, warn}; + +/// Captures the flashblocks and engine `ExecutedBlock`s synchronously (cheap Arc clones), +/// then spawns the heavy comparison on a blocking thread to avoid stalling the canonical +/// stream handler. +pub(crate) fn debug_compare_flashblocks_bundle_states( + flashblocks_state: &FlashblockStateCache, + block_number: u64, + block_hash: alloy_primitives::B256, + mut fb_trie_updates: Option, +) { + // Capture data synchronously (before handle_canonical_block evicts the cache). + // These are cheap — ExecutedBlock internals are Arc'd. + let fb_block = flashblocks_state.debug_get_executed_block_by_number(block_number); + let engine_block = flashblocks_state + .canon_in_memory_state + .state_by_hash(block_hash) + .map(|state| state.block()); + + if fb_trie_updates.is_none() { + // Flashblocks state cache might be lagging behind canonical chainstate. Check pending sequence. + // Only use target flashblock's accumulated trie_updates — non-target sequences have incomplete + // accumulation and would produce false-positive mismatches. + fb_trie_updates = flashblocks_state + .get_pending_sequence() + .filter(|seq| seq.get_height() == block_number && seq.is_target_flashblock()) + .map(|seq| seq.prefix_execution_meta.accumulated_trie_updates.clone().into_sorted()); + } + + // Spawn the heavy comparison on a blocking thread so the canonical stream handler + // stays responsive. trie_data() is synchronous (parking_lot::Mutex, no async). + tokio::task::spawn_blocking(move || { + compare_executed_blocks(fb_block, engine_block, block_number, fb_trie_updates); + }); +} + +/// Performs the deep comparison between flashblocks and engine `ExecutedBlock`s. +fn compare_executed_blocks( + fb_block: Option>, + engine_block: Option>, + block_number: u64, + fb_trie_updates: Option, +) { + let (Some(fb), Some(eng)) = (fb_block, engine_block) else { + debug!( + target: "flashblocks::verify", + block_number, + "Skipping BundleState comparison (block not available in both caches)" + ); + return; + }; + + let fb_hash = fb.recovered_block.hash(); + let eng_hash = eng.recovered_block.hash(); + + let fb_bundle = &fb.execution_output.state; + let eng_bundle = &eng.execution_output.state; + + // Deep compare accounts: match by address, compare BundleAccount fields + let mut account_mismatches = Vec::new(); + let mut fb_only = Vec::new(); + let mut eng_only = Vec::new(); + for (addr, fb_acct) in &fb_bundle.state { + if let Some(eng_acct) = eng_bundle.state.get(addr) { + if fb_acct != eng_acct { + account_mismatches.push(*addr); + } + } else { + fb_only.push(*addr); + } + } + for addr in eng_bundle.state.keys() { + if !fb_bundle.state.contains_key(addr) { + eng_only.push(*addr); + } + } + + // Deep compare reverts: both should have exactly 1 entry after flattening. + // Match by address within each revert vec. + let mut revert_mismatches = Vec::new(); + let mut revert_fb_only = Vec::new(); + let mut revert_eng_only = Vec::new(); + if fb_bundle.reverts.len() == eng_bundle.reverts.len() { + for (fb_rev, eng_rev) in fb_bundle.reverts.iter().zip(eng_bundle.reverts.iter()) { + let fb_map: std::collections::HashMap<_, _> = + fb_rev.iter().map(|(a, r)| (a, r)).collect(); + let eng_map: std::collections::HashMap<_, _> = + eng_rev.iter().map(|(a, r)| (a, r)).collect(); + for (addr, fb_r) in &fb_map { + if let Some(eng_r) = eng_map.get(addr) { + if fb_r != eng_r { + revert_mismatches.push(**addr); + } + } else { + revert_fb_only.push(**addr); + } + } + for addr in eng_map.keys() { + if !fb_map.contains_key(addr) { + revert_eng_only.push(**addr); + } + } + } + } + + // Compare hashed_state (the state diff input to trie computation). + // This confirms the incremental BundleState produces the same hashed diff + // as a fresh execution — critical since we send hashed_state to the engine pre-warm. + let fb_trie = fb.trie_data(); + let eng_trie = eng.trie_data(); + let hashed_state_match = *fb_trie.hashed_state == *eng_trie.hashed_state; + + let all_match = fb_hash == eng_hash + && account_mismatches.is_empty() + && fb_only.is_empty() + && eng_only.is_empty() + && fb_bundle.reverts.len() == eng_bundle.reverts.len() + && revert_mismatches.is_empty() + && revert_fb_only.is_empty() + && revert_eng_only.is_empty() + && hashed_state_match; + + if all_match { + info!( + target: "flashblocks::verify", + block_number, + %fb_hash, + accounts = fb_bundle.state.len(), + reverts = fb_bundle.reverts.len(), + "Execution output MATCH: flashblocks == engine" + ); + } else { + warn!( + target: "flashblocks::verify", + block_number, + fb_hash = %fb_hash, + eng_hash = %eng_hash, + hash_match = fb_hash == eng_hash, + fb_accounts = fb_bundle.state.len(), + eng_accounts = eng_bundle.state.len(), + account_mismatches = account_mismatches.len(), + fb_only_accounts = fb_only.len(), + eng_only_accounts = eng_only.len(), + fb_reverts = fb_bundle.reverts.len(), + eng_reverts = eng_bundle.reverts.len(), + revert_mismatches = revert_mismatches.len(), + revert_fb_only = revert_fb_only.len(), + revert_eng_only = revert_eng_only.len(), + hashed_state_match, + "Execution output MISMATCH: flashblocks != engine" + ); + for addr in account_mismatches.iter().take(3) { + warn!(target: "flashblocks::verify", %addr, "Account state mismatch"); + } + for addr in revert_mismatches.iter().take(3) { + warn!(target: "flashblocks::verify", %addr, "Revert mismatch"); + } + } + + // Compare accumulated trie_updates (merged across all flashblock indices) with + // the engine's fresh trie_updates. This validates that the incremental accumulation + // via TrieUpdates::extend() produces the same result as a fresh single-pass computation. + let Some(fb_updates) = fb_trie_updates else { + return; + }; + if fb_updates == *eng_trie.trie_updates { + info!( + target: "flashblocks::verify", + block_number, + "Trie updates MATCH: flashblocks == engine" + ); + } else { + warn!( + target: "flashblocks::verify", + block_number, + "Trie updates MISMATCH: flashblocks != engine" + ); + } +} diff --git a/crates/flashblocks/src/execution/assemble.rs b/crates/flashblocks/src/execution/assemble.rs new file mode 100644 index 00000000..b426aa34 --- /dev/null +++ b/crates/flashblocks/src/execution/assemble.rs @@ -0,0 +1,118 @@ +use alloy_consensus::{ + constants::EMPTY_WITHDRAWALS, Block, BlockBody, Header, EMPTY_OMMER_ROOT_HASH, +}; +use alloy_eips::{eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE}; +use alloy_primitives::{Bloom, B256}; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; +use reth_errors::BlockExecutionError; +use reth_optimism_consensus::isthmus; +use reth_optimism_forks::OpHardforks; +use reth_provider::StateProvider; +use reth_revm::db::BundleState; + +/// Input for [`assemble_flashblock`] bundling pre-computed roots and execution data. +pub(crate) struct FlashblockAssemblerInput<'a, T> { + /// The flashblock base payload attributes. + pub base: &'a OpFlashblockPayloadBase, + /// Pre-computed state root. + pub state_root: B256, + /// Pre-computed transaction root. + pub transactions_root: B256, + /// Pre-computed receipts root. + pub receipts_root: B256, + /// Pre-computed logs bloom. + pub logs_bloom: Bloom, + /// Total gas used by the block. + pub gas_used: u64, + /// Total blob gas used by the block. + pub blob_gas_used: u64, + /// Bundle state from execution (for isthmus `withdrawals_root` computation). + pub bundle_state: &'a BundleState, + /// State provider for isthmus `withdrawals_root` computation. + pub state_provider: &'a dyn StateProvider, + /// Signed transactions for the block body. + pub transactions: Vec, +} + +/// Assembles a flashblock ([`Block`]) from pre-computed roots and execution output. +/// +/// Mirrors `OpBlockAssembler::assemble_block()` for hardfork-dependent header fields +/// (`withdrawals_root`, `requests_hash`, `blob_gas_used`, `excess_blob_gas`) but uses +/// pre-computed `transactions_root`, `receipts_root`, `logs_bloom`, and `state_root` +/// directly instead of recomputing them. +pub(crate) fn assemble_flashblock( + chain_spec: &ChainSpec, + input: FlashblockAssemblerInput<'_, T>, +) -> Result, BlockExecutionError> +where + ChainSpec: OpHardforks, +{ + let FlashblockAssemblerInput { + base, + state_root, + transactions_root, + receipts_root, + logs_bloom, + gas_used, + blob_gas_used, + bundle_state, + state_provider, + transactions, + } = input; + + let timestamp = base.timestamp; + let mut requests_hash = None; + + let withdrawals_root = if chain_spec.is_isthmus_active_at_timestamp(timestamp) { + requests_hash = Some(EMPTY_REQUESTS_HASH); + Some( + isthmus::withdrawals_root(bundle_state, state_provider) + .map_err(BlockExecutionError::other)?, + ) + } else if chain_spec.is_canyon_active_at_timestamp(timestamp) { + Some(EMPTY_WITHDRAWALS) + } else { + None + }; + + let (excess_blob_gas, blob_gas_used) = if chain_spec.is_jovian_active_at_timestamp(timestamp) { + (Some(0), Some(blob_gas_used)) + } else if chain_spec.is_ecotone_active_at_timestamp(timestamp) { + (Some(0), Some(0)) + } else { + (None, None) + }; + + let header = Header { + parent_hash: base.parent_hash, + ommers_hash: EMPTY_OMMER_ROOT_HASH, + beneficiary: base.fee_recipient, + state_root, + transactions_root, + receipts_root, + withdrawals_root, + logs_bloom, + timestamp, + mix_hash: base.prev_randao, + nonce: BEACON_NONCE.into(), + base_fee_per_gas: Some(base.base_fee_per_gas.saturating_to()), + number: base.block_number, + gas_limit: base.gas_limit, + difficulty: Default::default(), + gas_used, + extra_data: base.extra_data.clone(), + parent_beacon_block_root: Some(base.parent_beacon_block_root), + blob_gas_used, + excess_blob_gas, + requests_hash, + }; + + Ok(Block::new( + header, + BlockBody { + transactions, + ommers: Default::default(), + withdrawals: chain_spec.is_canyon_active_at_timestamp(timestamp).then(Default::default), + }, + )) +} diff --git a/crates/flashblocks/src/execution/mod.rs b/crates/flashblocks/src/execution/mod.rs new file mode 100644 index 00000000..24b5bd6a --- /dev/null +++ b/crates/flashblocks/src/execution/mod.rs @@ -0,0 +1,104 @@ +pub(crate) mod assemble; +pub(crate) mod validator; + +pub(crate) use validator::FlashblockSequenceValidator; + +use alloy_eips::eip4895::Withdrawal; +use alloy_rpc_types_engine::PayloadId; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; + +use reth_optimism_primitives::OpReceipt; +use reth_provider::{ + BlockNumReader, ChangeSetReader, DatabaseProviderFactory, PruneCheckpointReader, + StageCheckpointReader, StorageChangeSetReader, StorageSettingsCache, +}; +use reth_revm::cached::CachedReads; +use reth_trie::updates::TrieUpdates; + +pub(crate) struct BuildArgs { + pub(crate) base: OpFlashblockPayloadBase, + pub(crate) payload_id: PayloadId, + pub(crate) transactions: I, + pub(crate) withdrawals: Vec, + pub(crate) last_flashblock_index: u64, + pub(crate) target_index: u64, +} + +/// Cached prefix execution data used to resume canonical builds. +#[derive(Debug, Clone, Default)] +pub struct PrefixExecutionMeta { + /// The payload ID of the latest flashblocks sequence. + pub(crate) payload_id: PayloadId, + /// Cached reads from execution for reuse. + pub cached_reads: CachedReads, + /// Number of leading transactions covered by cached execution. + pub(crate) cached_tx_count: usize, + /// Total gas used by the cached prefix. + pub(crate) gas_used: u64, + /// Total blob/DA gas used by the cached prefix. + pub(crate) blob_gas_used: u64, + /// The last flashblock index of the latest flashblocks sequence. + pub(crate) last_flashblock_index: u64, + /// Accumulated trie updates across sequence incremental executions. + pub(crate) accumulated_trie_updates: TrieUpdates, +} + +/// Strategy describing how to compute the state root. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum StateRootStrategy { + /// Use the state root task (background sparse trie computation). + StateRootTask, + /// Run the parallel state root computation on the calling thread. + Parallel, + /// Fall back to synchronous computation via the state provider. + Synchronous, +} + +/// Receipt requirements for cache-resume flow. +pub trait FlashblockReceipt: Clone { + /// Adds `gas_offset` to each receipt's `cumulative_gas_used`. + fn add_cumulative_gas_offset(&mut self, gas_offset: u64); +} + +impl FlashblockReceipt for OpReceipt { + fn add_cumulative_gas_offset(&mut self, gas_offset: u64) { + if gas_offset == 0 { + return; + } + let inner = self.as_receipt_mut(); + inner.cumulative_gas_used = inner.cumulative_gas_used.saturating_add(gas_offset); + } +} + +/// Trait alias for the bounds required on a provider factory to create an +/// [`OverlayStateProviderFactory`] that supports parallel and serial state +/// root computation. +pub trait OverlayProviderFactory: + DatabaseProviderFactory< + Provider: StageCheckpointReader + + PruneCheckpointReader + + BlockNumReader + + ChangeSetReader + + StorageChangeSetReader + + StorageSettingsCache, + > + Clone + + Send + + Sync + + 'static +{ +} + +impl OverlayProviderFactory for T where + T: DatabaseProviderFactory< + Provider: StageCheckpointReader + + PruneCheckpointReader + + BlockNumReader + + ChangeSetReader + + StorageChangeSetReader + + StorageSettingsCache, + > + Clone + + Send + + Sync + + 'static +{ +} diff --git a/crates/flashblocks/src/execution/validator.rs b/crates/flashblocks/src/execution/validator.rs new file mode 100644 index 00000000..77371ae4 --- /dev/null +++ b/crates/flashblocks/src/execution/validator.rs @@ -0,0 +1,1145 @@ +use crate::{ + cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}, + execution::{ + assemble::{assemble_flashblock, FlashblockAssemblerInput}, + BuildArgs, FlashblockReceipt, OverlayProviderFactory, PrefixExecutionMeta, + StateRootStrategy, + }, +}; +use std::{ + collections::HashMap, + convert::Infallible, + panic::{self, AssertUnwindSafe}, + sync::{mpsc::RecvTimeoutError, Arc}, + time::{Duration, Instant}, +}; +use tracing::*; + +use alloy_consensus::{proofs::calculate_transaction_root, BlockHeader}; +use alloy_eip7928::BlockAccessList; +use alloy_eips::eip2718::{Encodable2718, WithEncoded}; +use alloy_evm::block::ExecutableTxParts; +use alloy_primitives::{Address, B256}; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; + +use reth_chain_state::{DeferredTrieData, ExecutedBlock, LazyOverlay}; +use reth_engine_primitives::TreeConfig; +use reth_engine_tree::tree::{ + payload_processor::{ + receipt_root_task::{IndexedReceipt, ReceiptRootTaskHandle}, + ExecutionEnv, PayloadProcessor, + }, + sparse_trie::StateRootComputeOutcome, + CachedStateProvider, PayloadHandle, StateProviderBuilder, +}; +use reth_errors::BlockExecutionError; +use reth_errors::RethError; +use reth_evm::{ + execute::{BlockExecutor, ExecutableTxFor}, + ConfigureEvm, Evm, EvmEnvFor, TxEnvFor, +}; +use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; +use reth_optimism_forks::OpHardforks; +use reth_primitives_traits::{ + transaction::TxHashRef, HeaderTy, NodePrimitives, Recovered, RecoveredBlock, SealedHeaderFor, +}; +use reth_provider::{ + providers::OverlayStateProviderFactory, BlockReader, DatabaseProviderROFactory, + HashedPostStateProvider, HeaderProvider, ProviderError, StateProvider, StateProviderFactory, + StateReader, +}; +use reth_revm::{ + cached::CachedReads, + database::StateProviderDatabase, + db::{ + states::{ + bundle_state::BundleRetention, + reverts::{AccountRevert, Reverts}, + }, + State, + }, +}; +use reth_rpc_eth_types::PendingBlock; +use reth_tasks::Runtime; +use reth_trie::{updates::TrieUpdates, HashedPostState, StateRoot}; +use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; + +/// Builds [`PendingSequence`]s from the accumulated flashblock transaction sequences. +/// Commits results directly to [`FlashblockStateCache`] via `handle_pending_sequence()`. +/// +/// The execution uses the Reth's [`PayloadProcessor`] for optimal execution and state +/// root calculation of flashlbocks sequence. All 3 state root computation strategies +/// are supported (synchronous, parrallel and state root task using sparse trie). +/// +/// - **Fresh (canonical parent)**: `StateProviderBuilder` with no overlay blocks. +/// - **Fresh (non-canonical parent)**: `StateProviderBuilder` with overlay blocks from +/// the flashblocks confirm/pending cache via `get_overlay_data()`. +/// - **Incremental (same height)**: Full re-execution via `execute_fresh()`. The warm +/// execution cache and `PreservedSparseTrie` from the previous sequence build offset +/// the cost of re-executing prefix transactions. +pub(crate) struct FlashblockSequenceValidator +where + EvmConfig: ConfigureEvm, + ChainSpec: OpHardforks, +{ + /// The flashblocks state cache containing the flashblocks state cache layer. + flashblocks_state: FlashblockStateCache, + /// Provider for database state access. + provider: Provider, + /// EVM configuration. + evm_config: EvmConfig, + /// Chain specification for hardfork checks. + chain_spec: Arc, + /// Configuration for the engine tree. + tree_config: TreeConfig, + /// Payload processor for state root computation. + payload_processor: PayloadProcessor, + /// Task runtime for spawning parallel work. + runtime: Runtime, +} + +impl + FlashblockSequenceValidator +where + N: NodePrimitives, + N::Receipt: FlashblockReceipt, + EvmConfig: ConfigureEvm + Unpin> + + 'static, + Provider: StateProviderFactory + + HeaderProvider

> + + OverlayProviderFactory + + BlockReader + + StateReader + + HashedPostStateProvider + + Unpin + + Clone, + ChainSpec: OpHardforks, +{ + pub(crate) fn new( + evm_config: EvmConfig, + provider: Provider, + chain_spec: Arc, + flashblocks_state: FlashblockStateCache, + runtime: Runtime, + tree_config: TreeConfig, + ) -> Self { + let payload_processor = PayloadProcessor::new( + runtime.clone(), + evm_config.clone(), + &tree_config, + Default::default(), + ); + Self { + flashblocks_state, + provider, + evm_config, + chain_spec, + tree_config, + payload_processor, + runtime, + } + } + + /// Executes the incoming flashblocks sequence transactions delta and commits the + /// result to the flashblocks state cache. + pub(crate) fn execute_sequence>>>( + &mut self, + args: BuildArgs, + ) -> eyre::Result<()> + where + N::SignedTx: Encodable2718, + N::Block: From>, + { + // Pre-validate incoming flashblocks sequence + let pending_sequence = self.prevalidate_incoming_sequence(&args)?; + + let parent_hash = args.base.parent_hash; + let block_transactions: Vec<_> = args.transactions.into_iter().collect(); + let block_transaction_count = block_transactions.len(); + let transactions: Vec<_> = if let Some(ref seq) = pending_sequence { + block_transactions + .iter() + .skip(seq.prefix_execution_meta.cached_tx_count) + .cloned() + .collect() + } else { + block_transactions.clone() + }; + + // Get state provider builder. + // 1. Fresh builds - get the provider builder from parent hash. + // 2. Incremental builds - get provider builder from pending sequence hash. + let hash = pending_sequence.as_ref().map_or(parent_hash, |seq| seq.get_hash()); + let (provider_builder, header, overlay_data) = self.state_provider_builder(hash)?; + let mut state_provider = provider_builder.build()?; + + // For incremental builds, use the previous index's computed state root so the incremental + // prefix trie nodes (PreservedSparseTrie) are re-used, to ensure SR calculation is only + // done on suffix changes and optimized. + let parent_state_root = header.state_root(); + let parent_header = + pending_sequence.as_ref().map_or(header, |seq| seq.parent_header.clone()); + + let attrs = args.base.clone().into(); + let evm_env = + self.evm_config.next_evm_env(&parent_header, &attrs).map_err(RethError::other)?; + + let execution_env = ExecutionEnv { + evm_env: evm_env.clone(), + hash: B256::ZERO, + parent_hash, + parent_state_root, + transaction_count: transactions.len(), + withdrawals: Some(args.withdrawals), + }; + + // Plan the strategy used for state root computation. + let strategy = self.plan_state_root_computation(); + + debug!( + target: "flashblocks::validator", + execute_height = args.base.block_number, + ?strategy, + "Decided which state root algorithm to run" + ); + + // TODO: Extract the BAL once flashblocks BAL is supported + let bal = None; + + // Create lazy overlay from ancestors - this doesn't block, allowing execution to start + // before the trie data is ready. The overlay will be computed on first access. + let (lazy_overlay, anchor_hash) = + Self::get_parent_lazy_overlay(overlay_data.as_ref(), hash); + + // Create overlay factory for payload processor (StateRootTask path needs it for + // multiproofs) + let overlay_factory = OverlayStateProviderFactory::new( + self.provider.clone(), + self.flashblocks_state.get_changeset_cache(), + ) + .with_block_hash(Some(anchor_hash)) + .with_lazy_overlay(lazy_overlay); + + // Spawn the appropriate processor based on strategy. + let mut handle = self.spawn_payload_processor( + execution_env, + transactions.clone(), + provider_builder, + overlay_factory.clone(), + strategy, + bal, + )?; + + // Use cached state provider before executing, used in execution after prewarming threads + // complete + if let Some((caches, cache_metrics)) = handle.caches().zip(handle.cache_metrics()) { + state_provider = + Box::new(CachedStateProvider::new(state_provider, caches, cache_metrics)); + }; + + // Execute the block and handle any execution errors. + // The receipt root task is spawned before execution and receives receipts incrementally + // as transactions complete, allowing parallel computation during execution. + let (output, senders, receipt_root_rx, cached_reads) = self.execute_block( + state_provider.as_ref(), + evm_env, + &parent_header, + attrs, + transactions, + pending_sequence.as_ref(), + &mut handle, + )?; + + // After executing the block we can stop prewarming transactions + handle.stop_prewarming_execution(); + + // Create ExecutionOutcome early so we can terminate caching before validation and state + // root computation. Using Arc allows sharing with both the caching task and the deferred + // trie task without cloning the expensive BundleState. + let output = Arc::new(output); + + // Terminate caching task early since execution is complete and caching is no longer + // needed. This frees up resources while state root computation continues. + let valid_block_tx = handle.terminate_caching(Some(output.clone())); + + // Extract signed transactions for the block body before moving + // `block_transactions` into the tx root closure. + let body_transactions: Vec = + block_transactions.iter().map(|tx| tx.1.inner().clone()).collect(); + + // Spawn async tx root computation + let (result_tx, result_rx) = tokio::sync::oneshot::channel(); + self.payload_processor.executor().spawn_blocking(move || { + let txs: Vec<_> = block_transactions.iter().map(|tx| &tx.1).collect(); + let _ = result_tx.send(calculate_transaction_root(&txs)); + }); + + // Wait for the receipt root computation to complete. + let (receipts_root, logs_bloom) = { + debug!(target: "flashblocks::validator", "wait_receipt_root"); + receipt_root_rx + .blocking_recv() + .inspect_err(|_| { + tracing::error!( + target: "flashblocks::validator", + execute_height = args.base.block_number, + "Receipt root task dropped sender without result, receipt root calculation likely aborted" + ); + })? + }; + let transactions_root = result_rx.blocking_recv().inspect_err(|_| { + tracing::error!( + target: "flashblocks::validator", + "Transaction root task dropped sender without result, transaction root calculation likely aborted" + ); + })?; + + let root_time = Instant::now(); + let hashed_state = self.provider.hashed_post_state(&output.state); + let mut maybe_state_root = None; + match strategy { + StateRootStrategy::StateRootTask => { + debug!(target: "flashblocks::validator", execute_height = args.base.block_number, "Using sparse trie state root algorithm"); + + let task_result = self.await_state_root_with_timeout( + &mut handle, + overlay_factory.clone(), + &hashed_state, + args.base.block_number, + )?; + + match task_result { + Ok(StateRootComputeOutcome { state_root, trie_updates }) => { + let elapsed = root_time.elapsed(); + maybe_state_root = Some((state_root, trie_updates)); + info!(target: "flashblocks::validator", execute_height = args.base.block_number, ?state_root, ?elapsed, "State root task finished"); + } + Err(error) => { + debug!(target: "flashblocks::validator", %error, "State root task failed"); + } + } + } + StateRootStrategy::Parallel => { + debug!(target: "flashblocks::validator", execute_height = args.base.block_number, "Using parallel state root algorithm"); + match self.compute_state_root_parallel(overlay_factory.clone(), &hashed_state) { + Ok(result) => { + let elapsed = root_time.elapsed(); + info!( + target: "flashblocks::validator", + execute_height = args.base.block_number, + regular_state_root = ?result.0, + ?elapsed, + "Regular root task finished" + ); + maybe_state_root = Some((result.0, result.1)); + } + Err(error) => { + debug!(target: "flashblocks::validator", execute_height = args.base.block_number, err = %error, "Parallel state root computation failed"); + } + } + } + StateRootStrategy::Synchronous => {} + } + + // Determine the state root. + // If the state root was computed in parallel, we use it. + // Otherwise, we fall back to computing it synchronously. + let (state_root, trie_output) = if let Some(maybe_state_root) = maybe_state_root { + maybe_state_root + } else { + // fallback is to compute the state root regularly in sync + warn!(target: "flashblocks::validator", execute_height = args.base.block_number, "Failed to compute state root"); + let (root, updates) = + Self::compute_state_root_serial(overlay_factory.clone(), &hashed_state)?; + (root, updates) + }; + + // Capture execution metrics before `output` is moved into the deferred trie task. + let prefix_gas_used = output.result.gas_used; + let prefix_blob_gas_used = output.result.blob_gas_used; + + // Accumulate trie_updates across sequence incremental executions. + let mut accumulated_trie_updates = pending_sequence + .as_ref() + .map(|seq| seq.prefix_execution_meta.accumulated_trie_updates.clone()) + .unwrap_or_default(); + accumulated_trie_updates.extend(trie_output.clone()); + + // Assemble the block using pre-computed roots (avoids recomputation). + let block = assemble_flashblock( + self.chain_spec.as_ref(), + FlashblockAssemblerInput { + base: &args.base, + state_root, + transactions_root, + receipts_root, + logs_bloom, + gas_used: prefix_gas_used, + blob_gas_used: prefix_blob_gas_used, + bundle_state: &output.state, + state_provider: state_provider.as_ref(), + transactions: body_transactions, + }, + )?; + let block: N::Block = block.into(); + let block = RecoveredBlock::new_unhashed(block, senders); + + if let Some(valid_block_tx) = valid_block_tx { + let _ = valid_block_tx.send(()); + } + let executed_block = self.spawn_deferred_trie_task( + block, + output, + hashed_state, + // Only pass prefix trie updates to the deferred trie task + trie_output, + overlay_data, + overlay_factory, + ); + + // Update `PayloadProcessor`'s execution cache for next block's prewarming + self.payload_processor.on_inserted_executed_block( + executed_block.recovered_block.block_with_parent(), + &executed_block.execution_output.state, + ); + + self.commit_pending_sequence( + args.base, + executed_block, + parent_header, + PrefixExecutionMeta { + payload_id: args.payload_id, + cached_reads, + cached_tx_count: block_transaction_count, + gas_used: prefix_gas_used, + blob_gas_used: prefix_blob_gas_used, + last_flashblock_index: args.last_flashblock_index, + accumulated_trie_updates, + }, + block_transaction_count, + args.target_index, + )?; + + Ok(()) + } + + /// Builds a [`PendingSequence`] from an [`ExecutionOutcome`] and commits it to the + /// flashblocks state cache. + fn commit_pending_sequence( + &self, + base: OpFlashblockPayloadBase, + executed_block: ExecutedBlock, + parent_header: SealedHeaderFor, + prefix_execution_meta: PrefixExecutionMeta, + transaction_count: usize, + target_index: u64, + ) -> eyre::Result<()> { + // Build tx index + let block_hash = executed_block.recovered_block.hash(); + let mut tx_index = HashMap::with_capacity(transaction_count); + for (idx, tx) in executed_block.recovered_block.transactions_recovered().enumerate() { + tx_index.insert( + *tx.tx_hash(), + CachedTxInfo { + block_number: base.block_number, + block_hash, + tx_index: idx as u64, + tx: tx.into_inner().clone(), + receipt: executed_block.execution_output.result.receipts[idx].clone(), + }, + ); + } + + self.flashblocks_state.handle_pending_sequence(PendingSequence { + // Set pending block deadline to 1 second matching default blocktime. + pending: PendingBlock::with_executed_block( + Instant::now() + Duration::from_secs(1), + executed_block, + ), + tx_index, + block_hash, + parent_header, + prefix_execution_meta, + target_index, + }) + } + + fn prevalidate_incoming_sequence< + I: IntoIterator>>, + >( + &self, + args: &BuildArgs, + ) -> eyre::Result>> { + let incoming_payload_id = args.payload_id; + let incoming_block_number = args.base.block_number; + let incoming_last_index = args.last_flashblock_index; + if let Some(pending) = self.flashblocks_state.get_pending_sequence() { + // Validate incoming height continuity + let pending_height = pending.get_height(); + if pending_height != incoming_block_number + && pending_height + 1 != incoming_block_number + { + return Err(eyre::eyre!( + "height mismatch: incoming={incoming_block_number}, pending={pending_height}" + )); + } + if pending_height == incoming_block_number { + // Validate for incremental builds + let pending_payload_id = pending.prefix_execution_meta.payload_id; + if pending_payload_id != incoming_payload_id { + return Err(eyre::eyre!( + "payload_id mismatch on incremental build: incoming={incoming_payload_id}, pending={pending_payload_id}" + )); + } + let pending_last_index = pending.prefix_execution_meta.last_flashblock_index; + if pending_last_index >= incoming_last_index { + return Err(eyre::eyre!( + "skipping, flashblock index already validated: incoming={incoming_last_index}, pending={pending_last_index}" + )); + } + return Ok(Some(pending)); + } + // Optimistic fresh build + return Ok(None); + } + // No pending sequence. Validate with flashblocks state cache highest confirm height + let confirm_height = self.flashblocks_state.get_confirm_height(); + if confirm_height == 0 { + return Err(eyre::eyre!( + "confirm height not yet initialized, skipping: incoming={incoming_block_number}" + )); + } + if incoming_block_number > confirm_height + 1 { + return Err(eyre::eyre!( + "flashblock height too far ahead: incoming={incoming_block_number}, confirm={confirm_height}" + )); + } + if incoming_block_number <= confirm_height { + return Err(eyre::eyre!( + "stale height: incoming={incoming_block_number}, confirm={confirm_height}" + )); + } + Ok(None) + } + + /// Executes a block with the given state provider. + /// + /// This method orchestrates block execution: + /// 1. Sets up the EVM with state database and precompile caching + /// 2. Spawns a background task for incremental receipt root computation + /// 3. Executes transactions with metrics collection via state hooks + /// 4. Merges state transitions and records execution metrics + #[expect(clippy::type_complexity, clippy::too_many_arguments)] + fn execute_block( + &mut self, + state_provider: &dyn StateProvider, + evm_env: EvmEnvFor, + parent_header: &SealedHeaderFor, + attrs: EvmConfig::NextBlockEnvCtx, + transactions: Vec>>, + pending_sequence: Option<&PendingSequence>, + handle: &mut PayloadHandle, + ) -> eyre::Result<( + BlockExecutionOutput, + Vec
, + tokio::sync::oneshot::Receiver<(B256, alloy_primitives::Bloom)>, + CachedReads, + )> + where + T: ExecutableTxFor + ExecutableTxParts, N::SignedTx>, + Err: core::error::Error + Send + Sync + 'static, + N::SignedTx: TxHashRef, + EvmConfig: ConfigureEvm + Unpin> + + 'static, + { + // Build state + let mut read_cache = pending_sequence + .map(|p| p.prefix_execution_meta.cached_reads.clone()) + .unwrap_or_default(); + let cached_db = read_cache.as_db_mut(StateProviderDatabase::new(state_provider)); + let mut state_builder = State::builder().with_database(cached_db).with_bundle_update(); + if let Some(seq) = pending_sequence { + state_builder = state_builder + .with_bundle_prestate(seq.pending.executed_block.execution_output.state.clone()); + } + let mut db = state_builder.build(); + + // For incremental builds, the only pre-execution effect we need is set_state_clear_flag, + // which configures EVM empty-account handling (OP Stack chains activate Spurious Dragon + // at genesis, so this is always true). + if pending_sequence.is_some() { + db.set_state_clear_flag(true); + } + + let evm = self.evm_config.evm_with_env(&mut db, evm_env); + let execution_ctx = self + .evm_config + .context_for_next_block(parent_header, attrs) + .map_err(RethError::other)?; + let executor = self.evm_config.create_executor(evm, execution_ctx.clone()); + // Release the lifetime tie to &mut db so subsequent mutable borrows of db are allowed. + drop(execution_ctx); + + // Spawn background task to compute receipt root and logs bloom incrementally. + // Unbounded channel is used since tx count bounds capacity anyway (max ~30k txs per block). + let prefix_receipt_count = pending_sequence.map_or(0, |s| s.pending.receipts.len()); + let receipts_len = prefix_receipt_count + transactions.len(); + let (receipt_tx, receipt_rx) = crossbeam_channel::unbounded(); + let (result_tx, result_rx) = tokio::sync::oneshot::channel(); + let task_handle = ReceiptRootTaskHandle::new(receipt_rx, result_tx); + self.payload_processor.executor().spawn_blocking(move || task_handle.run(receipts_len)); + + let transaction_count = transactions.len(); + let mut executor = executor.with_state_hook(Some(Box::new(handle.state_hook()))); + + // Apply pre-execution changes for fresh builds + if pending_sequence.is_none() { + executor.apply_pre_execution_changes()?; + } + + // Execute all transactions and finalize + let execute_height = parent_header.number() + 1; + let (executor, suffix_senders, suffix_receipts) = self.execute_transactions( + executor, + pending_sequence, + transaction_count, + handle, + &receipt_tx, + execute_height, + )?; + drop(receipt_tx); + + // Finish execution and replace with the generated suffix receipts + let (_evm, mut result) = executor.finish().map(|(evm, result)| (evm.into_db(), result))?; + result.receipts = suffix_receipts; + if let Some(seq) = pending_sequence { + result = Self::merge_suffix_results( + &seq.prefix_execution_meta, + (*seq.pending.receipts).clone(), + result, + ); + } + // Reconstruct full senders list + let senders = if let Some(seq) = pending_sequence { + let mut all_senders = seq.pending.executed_block.recovered_block.senders().to_vec(); + all_senders.extend(suffix_senders); + all_senders + } else { + suffix_senders + }; + + // Merge transitions into bundle state + db.merge_transitions(BundleRetention::Reverts); + + // Explicitly drop db to release the mutable borrow on read_cache held via cached_db, + // allowing read_cache to be moved into the return value. + let mut bundle = db.take_bundle(); + drop(db); + + // For incremental builds, the bundle accumulates one revert entry per flashblock + // index (from with_bundle_prestate + merge_transitions at each index). The engine + // persistence service expects a single revert entry per block. Flatten all revert + // transitions into one: + // - Keep the earliest (parent-state) account info revert per address + // - Merge storage reverts across transitions (earliest per slot via or_insert) + if pending_sequence.is_some() && bundle.reverts.len() > 1 { + let mut reverts_map = HashMap::::new(); + for reverts in bundle.reverts.iter() { + for (addr, new_revert) in reverts { + if let Some(revert_entry) = reverts_map.get_mut(addr) { + // Merge new storage slots from later transitions and keep the + // earliest value per slot (parent-state revert entry). + for (slot, slot_revert) in &new_revert.storage { + revert_entry.storage.entry(*slot).or_insert(*slot_revert); + } + // Propagate wipe_storage if any transition triggers it, such as + // SELFDESTRUCT in a later flashblock index. + revert_entry.wipe_storage |= new_revert.wipe_storage; + } else { + reverts_map.insert(*addr, new_revert.clone()); + } + } + } + bundle.reverts = Reverts::new(vec![reverts_map.into_iter().collect()]); + } + + let output = BlockExecutionOutput { result, state: bundle }; + debug!(target: "flashblocks::validator", execute_height, "Executed block"); + + Ok((output, senders, result_rx, read_cache)) + } + + #[expect(clippy::type_complexity)] + fn execute_transactions( + &self, + mut executor: Executor, + pending_sequence: Option<&PendingSequence>, + transaction_count: usize, + handle: &mut PayloadHandle, + receipt_tx: &crossbeam_channel::Sender>, + execute_height: u64, + ) -> eyre::Result<(Executor, Vec
, Vec), BlockExecutionError> + where + T: ExecutableTxFor + + ExecutableTxParts< + <::Evm as Evm>::Tx, + ::Transaction, + >, + Executor: BlockExecutor, + Err: core::error::Error + Send + Sync + 'static, + N::SignedTx: TxHashRef, + EvmConfig: ConfigureEvm + Unpin> + + 'static, + { + // Send all previously executed receipts to the receipt root task for incremental builds. + let receipt_index_offset = if let Some(seq) = pending_sequence { + let prefix_count = seq.pending.receipts.len(); + for (index, receipt) in seq.pending.receipts.iter().enumerate() { + let _ = receipt_tx.send(IndexedReceipt::new(index, receipt.clone())); + } + prefix_count + } else { + 0 + }; + + let mut senders = Vec::with_capacity(transaction_count); + let mut receipts = Vec::new(); + let mut transactions = handle.iter_transactions(); + + // Some executors may execute transactions that do not append receipts during the + // main loop (e.g., system transactions whose receipts are added during finalization). + // In that case, invoking the callback on every transaction would resend the previous + // receipt with the same index and can panic the ordered root builder. + let mut last_sent_len = 0usize; + let prefix_gas_used = pending_sequence.map_or(0, |seq| seq.prefix_execution_meta.gas_used); + loop { + let Some(tx_result) = transactions.next() else { break }; + + let tx = tx_result.map_err(BlockExecutionError::other)?; + let tx_signer = *tx.signer(); + senders.push(tx_signer); + + trace!(target: "flashblocks::validator", execute_height, txhash = %tx.tx().tx_hash(), "Executing transaction"); + executor.execute_transaction(tx)?; + + let current_len = executor.receipts().len(); + if current_len > last_sent_len { + last_sent_len = current_len; + // Send the latest receipt to the background task for incremental root computation. + if let Some(mut receipt) = executor.receipts().last().cloned() { + let tx_index = receipt_index_offset + current_len - 1; + receipt.add_cumulative_gas_offset(prefix_gas_used); + receipts.push(receipt.clone()); + let _ = receipt_tx.send(IndexedReceipt::new(tx_index, receipt)); + } + } + } + Ok((executor, senders, receipts)) + } + + /// Determines the state root computation strategy based on configuration. + /// + /// Note: Use state root task only if prefix sets are empty, otherwise proof generation is + /// too expensive because it requires walking all paths in every proof. + const fn plan_state_root_computation(&self) -> StateRootStrategy { + if self.tree_config.state_root_fallback() { + StateRootStrategy::Synchronous + } else if self.tree_config.use_state_root_task() { + StateRootStrategy::StateRootTask + } else { + StateRootStrategy::Parallel + } + } + + fn spawn_payload_processor( + &mut self, + env: ExecutionEnv, + txs: Vec>>, + provider_builder: StateProviderBuilder, + overlay_factory: OverlayStateProviderFactory, + strategy: StateRootStrategy, + bal: Option>, + ) -> eyre::Result< + PayloadHandle< + impl ExecutableTxFor + use, + impl core::error::Error + Send + Sync + 'static + use, + N::Receipt, + >, + > { + let tx_iter = Self::flashblock_tx_iterator(txs); + match strategy { + StateRootStrategy::StateRootTask => { + // Use the pre-computed overlay factory for multiproofs + Ok(self.payload_processor.spawn( + env, + tx_iter, + provider_builder, + overlay_factory, + &self.tree_config, + bal, + )) + } + StateRootStrategy::Parallel | StateRootStrategy::Synchronous => Ok(self + .payload_processor + .spawn_cache_exclusive(env, tx_iter, provider_builder, bal)), + } + } + + /// Awaits the state root from the background task, with an optional timeout fallback. + /// + /// If a timeout is configured (`state_root_task_timeout`), this method first waits for the + /// state root task up to the timeout duration. If the task doesn't complete in time, a + /// sequential state root computation is spawned via `spawn_blocking`. Both computations + /// then race: the main thread polls the task receiver and the sequential result channel + /// in a loop, returning whichever finishes first. + /// + /// If no timeout is configured, this simply awaits the state root task without any fallback. + /// + /// Returns `ProviderResult>` where the outer `ProviderResult` captures + /// unrecoverable errors from the sequential fallback (e.g. DB errors), while the inner + /// `Result` captures parallel state root task errors that can still fall back to serial. + fn await_state_root_with_timeout( + &self, + handle: &mut PayloadHandle, + overlay_factory: OverlayStateProviderFactory, + hashed_state: &HashedPostState, + execute_height: u64, + ) -> eyre::Result> { + let Some(timeout) = self.tree_config.state_root_task_timeout() else { + return Ok(handle.state_root()); + }; + + let task_rx = handle.take_state_root_rx(); + + match task_rx.recv_timeout(timeout) { + Ok(result) => Ok(result), + Err(RecvTimeoutError::Disconnected) => { + Ok(Err(ParallelStateRootError::Other("sparse trie task dropped".to_string()))) + } + Err(RecvTimeoutError::Timeout) => { + warn!( + target: "flashblocks::validator", + execute_height, + ?timeout, + "State root task timed out, spawning sequential fallback" + ); + + let (seq_tx, seq_rx) = + std::sync::mpsc::channel::>(); + + let seq_overlay = overlay_factory; + let seq_hashed_state = hashed_state.clone(); + self.payload_processor.executor().spawn_blocking(move || { + let result = Self::compute_state_root_serial(seq_overlay, &seq_hashed_state); + let _ = seq_tx.send(result); + }); + + const POLL_INTERVAL: std::time::Duration = std::time::Duration::from_millis(10); + + loop { + match task_rx.recv_timeout(POLL_INTERVAL) { + Ok(result) => { + debug!( + target: "flashblocks::validator", + source = "task", + execute_height, + "State root timeout race won" + ); + return Ok(result); + } + Err(RecvTimeoutError::Disconnected) => { + debug!( + target: "flashblocks::validator", + execute_height, + "State root task dropped, waiting for sequential fallback" + ); + let result = seq_rx.recv().map_err(|_| { + eyre::eyre!(std::io::Error::other( + "both state root computations failed", + )) + })?; + let (state_root, trie_updates) = result?; + return Ok(Ok(StateRootComputeOutcome { state_root, trie_updates })); + } + Err(RecvTimeoutError::Timeout) => {} + } + + if let Ok(result) = seq_rx.try_recv() { + debug!( + target: "flashblocks::validator", + source = "sequential", + execute_height, + "State root timeout race won" + ); + let (state_root, trie_updates) = result?; + return Ok(Ok(StateRootComputeOutcome { state_root, trie_updates })); + } + } + } + } + } + + /// Compute state root for the given hashed post state in parallel. + /// + /// Uses an overlay factory which provides the state of the parent block, along with the + /// [`HashedPostState`] containing the changes of this block, to compute the state root and + /// trie updates for this block. + /// + /// # Returns + /// + /// Returns `Ok(_)` if computed successfully. + /// Returns `Err(_)` if error was encountered during computation. + fn compute_state_root_parallel( + &self, + overlay_factory: OverlayStateProviderFactory, + hashed_state: &HashedPostState, + ) -> eyre::Result<(B256, TrieUpdates), ParallelStateRootError> { + // The `hashed_state` argument will be taken into account as part of the overlay, but we + // need to use the prefix sets which were generated from it to indicate to the + // ParallelStateRoot which parts of the trie need to be recomputed. + let prefix_sets = hashed_state.construct_prefix_sets().freeze(); + let overlay_factory = + overlay_factory.with_extended_hashed_state_overlay(hashed_state.clone_into_sorted()); + ParallelStateRoot::new(overlay_factory, prefix_sets, self.runtime.clone()) + .incremental_root_with_updates() + } + + /// Compute state root for the given hashed post state in serial. + /// + /// Uses an overlay factory which provides the state of the parent block, along with the + /// [`HashedPostState`] containing the changes of this block, to compute the state root and + /// trie updates for this block. + fn compute_state_root_serial( + overlay_factory: OverlayStateProviderFactory, + hashed_state: &HashedPostState, + ) -> eyre::Result<(B256, TrieUpdates)> { + // The `hashed_state` argument will be taken into account as part of the overlay, but we + // need to use the prefix sets which were generated from it to indicate to the + // StateRoot which parts of the trie need to be recomputed. + let prefix_sets = hashed_state.construct_prefix_sets().freeze(); + let overlay_factory = + overlay_factory.with_extended_hashed_state_overlay(hashed_state.clone_into_sorted()); + + let provider = overlay_factory.database_provider_ro()?; + + Ok(StateRoot::new(&provider, &provider) + .with_prefix_sets(prefix_sets) + .root_with_updates()?) + } + + fn merge_suffix_results( + cached_prefix: &PrefixExecutionMeta, + cached_receipts: Vec, + suffix_result: BlockExecutionResult, + ) -> BlockExecutionResult { + let mut receipts = cached_receipts; + receipts.extend(suffix_result.receipts); + + // Use only suffix requests: the suffix executor's finish() produces + // post-execution requests from the complete block state (cached prestate + + // suffix changes). The cached prefix requests came from an intermediate + // state and must not be merged. + let requests = suffix_result.requests; + BlockExecutionResult { + receipts, + requests, + gas_used: cached_prefix.gas_used.saturating_add(suffix_result.gas_used), + blob_gas_used: cached_prefix.blob_gas_used.saturating_add(suffix_result.blob_gas_used), + } + } + + #[expect(clippy::type_complexity)] + fn state_provider_builder( + &self, + hash: B256, + ) -> eyre::Result<( + StateProviderBuilder, + SealedHeaderFor, + Option<(Vec>, B256)>, + )> { + // Get overlay data (executed blocks + parent header) from flashblocks + // state cache and the canonical in-memory cache. + if let Some((overlay_blocks, header, anchor_hash)) = + self.flashblocks_state.get_overlay_data(&hash)? + { + debug!( + target: "flashblocks::validator", + %hash, + "found state for block in flashblocks cache, creating provider builder"); + return Ok(( + StateProviderBuilder::new( + self.provider.clone(), + anchor_hash, + Some(overlay_blocks.clone()), + ), + header, + Some((overlay_blocks, anchor_hash)), + )); + } + // Check if block is persisted + if let Some(header) = self.provider.sealed_header_by_hash(hash)? { + debug!( + target: "flashblocks::validator", + %hash, + "found state for block in database, creating provider builder"); + return Ok(( + StateProviderBuilder::new(self.provider.clone(), hash, None), + header, + None, + )); + } + Err(eyre::eyre!("no state found for block {hash}")) + } + + /// Creates a [`LazyOverlay`] for the parent block without blocking. + /// + /// Returns a lazy overlay that will compute the trie input on first access, and the anchor + /// block hash (the highest persisted ancestor). This allows execution to start immediately + /// while the trie input computation is deferred until the overlay is actually needed. + /// + /// If parent is on disk (no in-memory blocks), returns `(None, tip_hash)`. + fn get_parent_lazy_overlay( + overlay_data: Option<&(Vec>, B256)>, + tip_hash: B256, + ) -> (Option, B256) { + let Some((blocks, anchor)) = overlay_data else { + return (None, tip_hash); + }; + let anchor_hash = *anchor; + + if blocks.is_empty() { + debug!(target: "flashblocks::validator", "Parent found on disk, no lazy overlay needed"); + return (None, anchor_hash); + } + + // Extract deferred trie data handles (non-blocking) + debug!( + target: "flashblocks::validator", + %anchor_hash, + num_blocks = blocks.len(), + "Creating lazy overlay for flashblock state cache in-memory blocks" + ); + let handles: Vec = blocks.iter().map(|b| b.trie_data_handle()).collect(); + (Some(LazyOverlay::new(anchor_hash, handles)), anchor_hash) + } + + /// Spawns a background task to compute and sort trie data for the executed block. + /// + /// This function creates a [`DeferredTrieData`] handle with fallback inputs and spawns a + /// blocking task that calls `wait_cloned()` to: + /// 1. Sort the block's hashed state and trie updates + /// 2. Merge ancestor overlays and extend with the sorted data + /// 3. Create an [`AnchoredTrieInput`](reth_chain_state::AnchoredTrieInput) for efficient future + /// trie computations + /// 4. Cache the result so subsequent calls return immediately + /// + /// If the background task hasn't completed when `trie_data()` is called, `wait_cloned()` + /// computes from the stored inputs, eliminating deadlock risk and duplicate computation. + /// + /// The validation hot path can return immediately after state root verification, + /// while consumers (DB writes, overlay providers, proofs) get trie data either + /// from the completed task or via fallback computation. + fn spawn_deferred_trie_task( + &self, + block: RecoveredBlock, + execution_outcome: Arc>, + hashed_state: HashedPostState, + trie_output: TrieUpdates, + overlay_data: Option<(Vec>, B256)>, + overlay_factory: OverlayStateProviderFactory, + ) -> ExecutedBlock { + // Capture parent hash and ancestor overlays for deferred trie input construction. + let (overlay_blocks, anchor_hash) = + overlay_data.unwrap_or_else(|| (Vec::new(), block.parent_hash())); + + // Collect lightweight ancestor trie data handles. We don't call trie_data() here; + // the merge and any fallback sorting happens in the compute_trie_input_task. + let ancestors: Vec = + overlay_blocks.iter().rev().map(|b| b.trie_data_handle()).collect(); + + // Create deferred handle with fallback inputs in case the background task hasn't completed. + let deferred_trie_data = DeferredTrieData::pending( + Arc::new(hashed_state), + Arc::new(trie_output), + anchor_hash, + ancestors, + ); + let deferred_handle_task = deferred_trie_data.clone(); + + // Capture block info and cache handle for changeset computation + let block_hash = block.hash(); + let block_number = block.number(); + let changeset_cache = self.flashblocks_state.get_changeset_cache(); + + // Spawn background task to compute trie data. Calling `wait_cloned` will compute from + // the stored inputs and cache the result, so subsequent calls return immediately. + let compute_trie_input_task = move || { + debug!( + target: "flashblocks::changeset", + ?block_number, + "compute_trie_input_task", + ); + + let result = panic::catch_unwind(AssertUnwindSafe(|| { + let computed = deferred_handle_task.wait_cloned(); + // Compute and cache changesets using the computed trie_updates + // Get a provider from the overlay factory for trie cursor access + let changeset_start = Instant::now(); + let changeset_result = + overlay_factory.database_provider_ro().and_then(|provider| { + reth_trie::changesets::compute_trie_changesets( + &provider, + &computed.trie_updates, + ) + .map_err(ProviderError::Database) + }); + + match changeset_result { + Ok(changesets) => { + debug!( + target: "flashblocks::changeset", + ?block_number, + elapsed = ?changeset_start.elapsed(), + "Computed and caching changesets" + ); + changeset_cache.insert(block_hash, block_number, Arc::new(changesets)); + } + Err(e) => { + warn!( + target: "flashblocks::changeset", + ?block_number, + ?e, + "Failed to compute changesets in deferred trie task" + ); + } + } + })); + + if result.is_err() { + error!( + target: "flashblocks::validator", + "Deferred trie task panicked; fallback computation will be used when trie data is accessed" + ); + } + }; + + // Spawn task that computes trie data asynchronously. + self.payload_processor.executor().spawn_blocking(compute_trie_input_task); + + ExecutedBlock::with_deferred_trie_data( + Arc::new(block), + execution_outcome, + deferred_trie_data, + ) + } + + #[allow(clippy::type_complexity)] + fn flashblock_tx_iterator( + transactions: Vec>>, + ) -> ( + Vec>>, + fn(WithEncoded>) -> Result, Infallible>, + ) { + (transactions, |tx| Ok(tx.1)) + } +} diff --git a/crates/flashblocks/src/handler.rs b/crates/flashblocks/src/handler.rs deleted file mode 100644 index 6ee42978..00000000 --- a/crates/flashblocks/src/handler.rs +++ /dev/null @@ -1,168 +0,0 @@ -use std::{net::SocketAddr, sync::Arc, time::Duration}; -use tracing::{debug, info, trace, warn}; - -use reth_node_api::FullNodeComponents; -use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_optimism_flashblocks::{FlashBlock, FlashBlockRx}; - -use xlayer_builder::{ - args::FlashblocksArgs, - flashblocks::{FlashblockPayloadsCache, WebSocketPublisher}, - metrics::{tokio::FlashblocksTaskMetrics, BuilderMetrics}, -}; - -pub struct FlashblocksService -where - Node: FullNodeComponents, -{ - node: Node, - flashblock_rx: FlashBlockRx, - ws_pub: Arc, - relay_flashblocks: bool, - datadir: ChainPath, -} - -impl FlashblocksService -where - Node: FullNodeComponents, -{ - pub fn new( - node: Node, - flashblock_rx: FlashBlockRx, - args: FlashblocksArgs, - relay_flashblocks: bool, - datadir: ChainPath, - ) -> Result { - let ws_addr = SocketAddr::new(args.flashblocks_addr.parse()?, args.flashblocks_port); - - let metrics = Arc::new(BuilderMetrics::default()); - let task_metrics = Arc::new(FlashblocksTaskMetrics::new()); - let ws_pub = Arc::new( - WebSocketPublisher::new( - ws_addr, - metrics, - &task_metrics.websocket_publisher, - args.ws_subscriber_limit, - ) - .map_err(|e| eyre::eyre!("Failed to create WebSocket publisher: {e}"))?, - ); - - info!(target: "flashblocks", "WebSocket publisher initialized at {}", ws_addr); - - Ok(Self { node, flashblock_rx, ws_pub, relay_flashblocks, datadir }) - } - - pub fn spawn(mut self) { - debug!(target: "flashblocks", "Initializing flashblocks service"); - - let task_executor = self.node.task_executor().clone(); - if self.relay_flashblocks { - let datadir = self.datadir.clone(); - let flashblock_rx = self.flashblock_rx.resubscribe(); - task_executor.spawn_critical_task( - "xlayer-flashblocks-persistence", - Box::pin(async move { - handle_persistence(flashblock_rx, datadir).await; - }), - ); - - task_executor.spawn_critical_task( - "xlayer-flashblocks-publish", - Box::pin(async move { - self.publish().await; - }), - ); - } - } - - async fn publish(&mut self) { - info!( - target: "flashblocks", - "Flashblocks websocket publisher started" - ); - - loop { - match self.flashblock_rx.recv().await { - Ok(flashblock) => { - trace!( - target: "flashblocks", - "Received flashblock: index={}, block_hash={}", - flashblock.index, - flashblock.diff.block_hash - ); - self.publish_flashblock(&flashblock).await; - } - Err(e) => { - warn!(target: "flashblocks", "Flashblock receiver error: {:?}", e); - break; - } - } - } - - info!(target: "flashblocks", "Flashblocks service stopped"); - } - - /// Relays the incoming flashblock to the flashblock websocket subscribers. - async fn publish_flashblock(&self, flashblock: &Arc) { - match self.ws_pub.publish(flashblock) { - Ok(_) => { - trace!( - target: "flashblocks", - "Published flashblock: index={}, block_hash={}", - flashblock.index, - flashblock.diff.block_hash - ); - } - Err(e) => { - warn!( - target: "flashblocks", - "Failed to publish flashblock: {:?}", e - ); - } - } - } -} - -/// Handles the persistence of the pending flashblocks sequence to disk. -async fn handle_persistence(mut rx: FlashBlockRx, datadir: ChainPath) { - let cache = FlashblockPayloadsCache::new(Some(datadir)); - - // Set default flush interval to 5 seconds - let mut flush_interval = tokio::time::interval(Duration::from_secs(5)); - let mut dirty = false; - - loop { - tokio::select! { - result = rx.recv() => { - match result { - Ok(flashblock) => { - if let Err(e) = cache.add_flashblock_payload(flashblock.as_ref().clone()) { - warn!(target: "flashblocks", "Failed to cache flashblock payload: {e}"); - continue; - } - dirty = true; - } - Err(e) => { - warn!(target: "flashblocks", "Persistence handle receiver error: {e:?}"); - break; - } - } - } - _ = flush_interval.tick() => { - if dirty { - if let Err(e) = cache.persist().await { - warn!(target: "flashblocks", "Failed to persist pending sequence: {e}"); - } - dirty = false; - } - } - } - } - - // Flush again on shutdown - if dirty && let Err(e) = cache.persist().await { - warn!(target: "flashblocks", "Failed final persist of pending sequence: {e}"); - } - - info!(target: "flashblocks", "Flashblocks persistence handle stopped"); -} diff --git a/crates/flashblocks/src/lib.rs b/crates/flashblocks/src/lib.rs index 271b1973..2ab26c61 100644 --- a/crates/flashblocks/src/lib.rs +++ b/crates/flashblocks/src/lib.rs @@ -1,90 +1,25 @@ //! X-Layer flashblocks crate. -pub mod handler; -pub mod pubsub; -pub mod subscription; - -use reth_primitives_traits::NodePrimitives; -use std::sync::Arc; - -// Included to enable serde feature for `OpReceipt` type used transitively -use reth_optimism_primitives as _; - -// Used by downstream crates -use alloy_rpc_types_eth as _; - -mod consensus; -pub use consensus::FlashBlockConsensusClient; - -mod payload; -pub use payload::{FlashBlock, PendingFlashBlock}; - -mod sequence; -pub use sequence::{ - FlashBlockCompleteSequence, FlashBlockPendingSequence, SequenceExecutionOutcome, -}; - -mod service; -pub use service::{ - create_canonical_block_channel, CanonicalBlockNotification, FlashBlockBuildInfo, - FlashBlockService, -}; - -mod worker; -pub use worker::FlashblockCachedReceipt; - mod cache; +mod debug; +mod execution; +mod persist; +mod state; +mod subscription; +mod ws; -mod pending_state; -pub use pending_state::{PendingBlockState, PendingStateRegistry}; - -pub mod validation; - -mod tx_cache; -pub use tx_cache::TransactionCache; +pub mod service; #[cfg(test)] mod test_utils; -mod ws; -pub use ws::{FlashBlockDecoder, WsConnect, WsFlashBlockStream}; - -/// Receiver of the most recent [`PendingFlashBlock`] built out of [`FlashBlock`]s. -pub type PendingBlockRx = tokio::sync::watch::Receiver>>; +pub use cache::{CachedTxInfo, FlashblockStateCache, PendingSequence}; +pub use service::{FlashblocksPersistCtx, FlashblocksRpcCtx, FlashblocksRpcService}; +pub use subscription::FlashblocksPubSub; +pub use ws::WsFlashBlockStream; -/// Receiver of the sequences of [`FlashBlock`]s built. -pub type FlashBlockCompleteSequenceRx = - tokio::sync::broadcast::Receiver; - -/// Receiver of received [`FlashBlock`]s from the (websocket) subscription. -pub type FlashBlockRx = tokio::sync::broadcast::Receiver>; - -/// Receiver that signals whether a [`FlashBlock`] is currently being built. -pub type InProgressFlashBlockRx = tokio::sync::watch::Receiver>; - -/// Container for all flashblocks-related listeners. -/// -/// Groups together the channels for flashblock-related updates. -#[derive(Debug)] -pub struct FlashblocksListeners { - /// Receiver of the most recent executed [`PendingFlashBlock`] built out of [`FlashBlock`]s. - pub pending_block_rx: PendingBlockRx, - /// Subscription channel of the complete sequences of [`FlashBlock`]s built. - pub flashblocks_sequence: tokio::sync::broadcast::Sender, - /// Receiver that signals whether a [`FlashBlock`] is currently being built. - pub in_progress_rx: InProgressFlashBlockRx, - /// Subscription channel for received flashblocks from the (websocket) connection. - pub received_flashblocks: tokio::sync::broadcast::Sender>, -} +use std::sync::Arc; +use xlayer_builder::broadcast::XLayerFlashblockPayload; -impl FlashblocksListeners { - /// Creates a new [`FlashblocksListeners`] with the given channels. - pub const fn new( - pending_block_rx: PendingBlockRx, - flashblocks_sequence: tokio::sync::broadcast::Sender, - in_progress_rx: InProgressFlashBlockRx, - received_flashblocks: tokio::sync::broadcast::Sender>, - ) -> Self { - Self { pending_block_rx, flashblocks_sequence, in_progress_rx, received_flashblocks } - } -} +pub type PendingSequenceRx = tokio::sync::watch::Receiver>>; +pub type ReceivedFlashblocksRx = tokio::sync::broadcast::Receiver>; diff --git a/crates/flashblocks/src/pending_state.rs b/crates/flashblocks/src/pending_state.rs deleted file mode 100644 index 6c367658..00000000 --- a/crates/flashblocks/src/pending_state.rs +++ /dev/null @@ -1,388 +0,0 @@ -//! Pending block state for speculative flashblock building. -//! -//! This module provides types for tracking execution state from flashblock builds, -//! enabling speculative building of subsequent blocks before their parent canonical -//! block arrives via P2P. - -use alloy_primitives::B256; -use reth_execution_types::BlockExecutionOutput; -use reth_primitives_traits::{HeaderTy, NodePrimitives, SealedHeader}; -use reth_revm::cached::CachedReads; -use std::{ - collections::{HashMap, VecDeque}, - sync::Arc, -}; - -/// Tracks the execution state from building a pending block. -/// -/// This is used to enable speculative building of subsequent blocks: -/// - When flashblocks for block N+1 arrive before canonical block N -/// - The pending state from building block N's flashblocks can be used -/// - This allows continuous flashblock processing without waiting for P2P -#[derive(Debug, Clone)] -pub struct PendingBlockState { - /// Locally computed block hash for this built block. - /// - /// This hash is used to match subsequent flashblock sequences by `parent_hash` - /// during speculative chaining. - pub block_hash: B256, - /// Block number that was built. - pub block_number: u64, - /// Parent hash of the built block (may be non-canonical for speculative builds). - pub parent_hash: B256, - /// Canonical anchor hash for state lookups. - /// - /// This is the hash used for `history_by_block_hash` when loading state. - /// For canonical builds, this equals `parent_hash`. - /// For speculative builds, this is the canonical block hash that the chain - /// of speculative builds is rooted at (forwarded from parent's anchor). - pub canonical_anchor_hash: B256, - /// Execution outcome containing state changes. - pub execution_outcome: Arc>, - /// Cached reads from execution for reuse. - pub cached_reads: CachedReads, - /// Sealed header for this built block. - /// - /// Used as the parent header for speculative child builds. - pub sealed_header: Option>>, -} - -impl PendingBlockState { - /// Creates a new pending block state. - pub const fn new( - block_hash: B256, - block_number: u64, - parent_hash: B256, - canonical_anchor_hash: B256, - execution_outcome: Arc>, - cached_reads: CachedReads, - ) -> Self { - Self { - block_hash, - block_number, - parent_hash, - canonical_anchor_hash, - execution_outcome, - cached_reads, - sealed_header: None, - } - } - - /// Attaches a sealed header for use as parent context in speculative builds. - pub fn with_sealed_header(mut self, sealed_header: SealedHeader>) -> Self { - self.sealed_header = Some(sealed_header); - self - } -} - -/// Registry of pending block states for speculative building. -/// -/// Maintains a small cache of recently built pending blocks, allowing -/// subsequent flashblock sequences to build on top of them even before -/// the canonical blocks arrive. -#[derive(Debug)] -pub struct PendingStateRegistry { - /// Executed pending states keyed by locally computed block hash. - by_block_hash: HashMap>, - /// Insertion order for bounded eviction. - insertion_order: VecDeque, - /// Most recently recorded block hash. - latest_block_hash: Option, - /// Maximum number of tracked pending states. - max_entries: usize, -} - -impl PendingStateRegistry { - const DEFAULT_MAX_ENTRIES: usize = 64; - - /// Creates a new pending state registry. - pub fn new() -> Self { - Self::with_max_entries(Self::DEFAULT_MAX_ENTRIES) - } - - /// Creates a new pending state registry with an explicit entry bound. - pub fn with_max_entries(max_entries: usize) -> Self { - let max_entries = max_entries.max(1); - Self { - by_block_hash: HashMap::with_capacity(max_entries), - insertion_order: VecDeque::with_capacity(max_entries), - latest_block_hash: None, - max_entries, - } - } - - /// Records a completed build's state for potential use by subsequent builds. - pub fn record_build(&mut self, state: PendingBlockState) { - let block_hash = state.block_hash; - - if self.by_block_hash.contains_key(&block_hash) { - self.insertion_order.retain(|hash| *hash != block_hash); - } - - self.by_block_hash.insert(block_hash, state); - self.insertion_order.push_back(block_hash); - self.latest_block_hash = Some(block_hash); - - while self.by_block_hash.len() > self.max_entries { - let Some(evicted_hash) = self.insertion_order.pop_front() else { - break; - }; - self.by_block_hash.remove(&evicted_hash); - if self.latest_block_hash == Some(evicted_hash) { - self.latest_block_hash = self.insertion_order.back().copied(); - } - } - } - - /// Gets the pending state for a given parent hash, if available. - /// - /// Returns `Some` if we have pending state whose `block_hash` matches the requested - /// `parent_hash`. - pub fn get_state_for_parent(&self, parent_hash: B256) -> Option<&PendingBlockState> { - self.by_block_hash.get(&parent_hash) - } - - /// Clears all pending state. - pub fn clear(&mut self) { - self.by_block_hash.clear(); - self.insertion_order.clear(); - self.latest_block_hash = None; - } - - /// Returns the current pending state, if any. - pub fn current(&self) -> Option<&PendingBlockState> { - self.latest_block_hash.and_then(|hash| self.by_block_hash.get(&hash)) - } -} - -impl Default for PendingStateRegistry { - fn default() -> Self { - Self::new() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use reth_optimism_primitives::OpPrimitives; - - type TestRegistry = PendingStateRegistry; - - #[test] - fn test_registry_returns_state_for_matching_parent() { - let mut registry = TestRegistry::new(); - - let block_hash = B256::repeat_byte(1); - let parent_hash = B256::repeat_byte(0); - let state = PendingBlockState { - block_hash, - block_number: 100, - parent_hash, - canonical_anchor_hash: parent_hash, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - registry.record_build(state); - - // Should find state when querying with matching block_hash as parent - let result = registry.get_state_for_parent(block_hash); - assert!(result.is_some()); - assert_eq!(result.unwrap().block_number, 100); - } - - #[test] - fn test_registry_returns_none_for_wrong_parent() { - let mut registry = TestRegistry::new(); - - let parent_hash = B256::repeat_byte(0); - let state = PendingBlockState { - block_hash: B256::repeat_byte(1), - block_number: 100, - parent_hash, - canonical_anchor_hash: parent_hash, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - registry.record_build(state); - - // Different parent hash should return None - assert!(registry.get_state_for_parent(B256::repeat_byte(2)).is_none()); - } - - #[test] - fn test_registry_clear() { - let mut registry = TestRegistry::new(); - - let parent_hash = B256::repeat_byte(0); - let state = PendingBlockState { - block_hash: B256::repeat_byte(1), - block_number: 100, - parent_hash, - canonical_anchor_hash: parent_hash, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - registry.record_build(state); - assert!(registry.current().is_some()); - - registry.clear(); - assert!(registry.current().is_none()); - } - - #[test] - fn test_registry_tracks_multiple_states_by_hash() { - let mut registry = TestRegistry::new(); - - let anchor = B256::repeat_byte(0); - let state_100 = PendingBlockState { - block_hash: B256::repeat_byte(1), - block_number: 100, - parent_hash: anchor, - canonical_anchor_hash: anchor, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - let state_101 = PendingBlockState { - block_hash: B256::repeat_byte(2), - block_number: 101, - parent_hash: state_100.block_hash, - canonical_anchor_hash: anchor, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - registry.record_build(state_100.clone()); - registry.record_build(state_101.clone()); - - assert_eq!(registry.current().map(|s| s.block_number), Some(101)); - assert_eq!( - registry.get_state_for_parent(state_100.block_hash).map(|s| s.block_number), - Some(100) - ); - assert_eq!( - registry.get_state_for_parent(state_101.block_hash).map(|s| s.block_number), - Some(101) - ); - } - - #[test] - fn test_registry_eviction_respects_max_entries() { - let mut registry = PendingStateRegistry::::with_max_entries(2); - let anchor = B256::repeat_byte(0); - - let state_100 = PendingBlockState { - block_hash: B256::repeat_byte(1), - block_number: 100, - parent_hash: anchor, - canonical_anchor_hash: anchor, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - let state_101 = PendingBlockState { - block_hash: B256::repeat_byte(2), - block_number: 101, - parent_hash: state_100.block_hash, - canonical_anchor_hash: anchor, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - let state_102 = PendingBlockState { - block_hash: B256::repeat_byte(3), - block_number: 102, - parent_hash: state_101.block_hash, - canonical_anchor_hash: anchor, - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - registry.record_build(state_100); - registry.record_build(state_101.clone()); - registry.record_build(state_102.clone()); - - assert!(registry.get_state_for_parent(B256::repeat_byte(1)).is_none()); - assert_eq!( - registry.get_state_for_parent(state_101.block_hash).map(|s| s.block_number), - Some(101) - ); - assert_eq!( - registry.get_state_for_parent(state_102.block_hash).map(|s| s.block_number), - Some(102) - ); - assert_eq!(registry.current().map(|s| s.block_number), Some(102)); - } - - /// Tests that `canonical_anchor_hash` is distinct from `parent_hash` in speculative chains. - /// - /// When building speculatively: - /// - Block N (canonical): `parent_hash` = N-1, `canonical_anchor` = N-1 (same) - /// - Block N+1 (speculative): `parent_hash` = N, `canonical_anchor` = N-1 (forwarded) - /// - Block N+2 (speculative): `parent_hash` = N+1, `canonical_anchor` = N-1 (still forwarded) - /// - /// The `canonical_anchor_hash` always points to the last canonical block used for - /// `history_by_block_hash` lookups. - #[test] - fn test_canonical_anchor_forwarding_semantics() { - // Canonical block N-1 (the anchor for speculative chain) - let canonical_anchor = B256::repeat_byte(0x00); - - // Block N built on canonical - anchor equals parent - let block_n_hash = B256::repeat_byte(0x01); - let state_n = PendingBlockState:: { - block_hash: block_n_hash, - block_number: 100, - parent_hash: canonical_anchor, - canonical_anchor_hash: canonical_anchor, // Same as parent for canonical build - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - // Verify block N's anchor is the canonical block - assert_eq!(state_n.canonical_anchor_hash, canonical_anchor); - assert_eq!(state_n.parent_hash, state_n.canonical_anchor_hash); - - // Block N+1 built speculatively on N - anchor is FORWARDED from N - let block_n1_hash = B256::repeat_byte(0x02); - let state_n1 = PendingBlockState:: { - block_hash: block_n1_hash, - block_number: 101, - parent_hash: block_n_hash, // Parent is block N - canonical_anchor_hash: state_n.canonical_anchor_hash, // Forwarded from N - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - // Verify N+1's anchor is still the canonical block, NOT block N - assert_eq!(state_n1.canonical_anchor_hash, canonical_anchor); - assert_ne!(state_n1.parent_hash, state_n1.canonical_anchor_hash); - - // Block N+2 built speculatively on N+1 - anchor still forwarded - let block_n2_hash = B256::repeat_byte(0x03); - let state_n2 = PendingBlockState:: { - block_hash: block_n2_hash, - block_number: 102, - parent_hash: block_n1_hash, // Parent is block N+1 - canonical_anchor_hash: state_n1.canonical_anchor_hash, // Forwarded from N+1 - execution_outcome: Arc::new(BlockExecutionOutput::default()), - cached_reads: CachedReads::default(), - sealed_header: None, - }; - - // Verify N+2's anchor is STILL the original canonical block - assert_eq!(state_n2.canonical_anchor_hash, canonical_anchor); - assert_ne!(state_n2.parent_hash, state_n2.canonical_anchor_hash); - - // All three blocks should have the same canonical anchor - assert_eq!(state_n.canonical_anchor_hash, state_n1.canonical_anchor_hash); - assert_eq!(state_n1.canonical_anchor_hash, state_n2.canonical_anchor_hash); - } -} diff --git a/crates/flashblocks/src/persist.rs b/crates/flashblocks/src/persist.rs new file mode 100644 index 00000000..3c35c991 --- /dev/null +++ b/crates/flashblocks/src/persist.rs @@ -0,0 +1,97 @@ +use crate::ReceivedFlashblocksRx; +use std::{sync::Arc, time::Duration}; +use tracing::*; + +use reth_node_core::dirs::{ChainPath, DataDirPath}; + +use xlayer_builder::{broadcast::WebSocketPublisher, flashblocks::FlashblockPayloadsCache}; + +/// Handles the persistence of the pending flashblocks sequence to disk. +pub async fn handle_persistence(mut rx: ReceivedFlashblocksRx, datadir: ChainPath) { + let cache = FlashblockPayloadsCache::new(Some(datadir)); + + // Set default flush interval to 5 seconds + let mut flush_interval = tokio::time::interval(Duration::from_secs(5)); + let mut dirty = false; + + loop { + tokio::select! { + result = rx.recv() => { + match result { + Ok(flashblock) => { + if let Err(e) = cache.add_flashblock_payload(flashblock.inner.clone()) { + warn!(target: "flashblocks", "Failed to cache flashblock payload: {e}"); + continue; + } + dirty = true; + } + Err(e) => { + warn!(target: "flashblocks", "Persistence handle receiver error: {e:?}"); + break; + } + } + } + _ = flush_interval.tick() => { + if dirty { + if let Err(e) = cache.persist().await { + warn!(target: "flashblocks", "Failed to persist pending sequence: {e}"); + } + dirty = false; + } + } + } + } + + // Flush again on shutdown + if dirty && let Err(e) = cache.persist().await { + warn!(target: "flashblocks", "Failed final persist of pending sequence: {e}"); + } + + info!(target: "flashblocks", "Flashblocks persistence handle stopped"); +} + +/// Handles the relaying of the flashblocks to the downstream flashblocks +/// websocket subscribers. +pub async fn handle_relay_flashblocks( + mut rx: ReceivedFlashblocksRx, + ws_pub: Arc, +) { + info!( + target: "flashblocks", + "Flashblocks websocket publisher started" + ); + + loop { + match rx.recv().await { + Ok(flashblock) => { + trace!( + target: "flashblocks", + "Received flashblock: index={}, block_hash={}", + flashblock.inner.index, + flashblock.inner.diff.block_hash + ); + match ws_pub.publish(&flashblock) { + Ok(_) => { + trace!( + target: "flashblocks", + "Published flashblock: index={}, block_hash={}", + flashblock.inner.index, + flashblock.inner.diff.block_hash + ); + } + Err(e) => { + warn!( + target: "flashblocks", + "Failed to publish flashblock: {:?}", e + ); + } + } + } + Err(e) => { + warn!(target: "flashblocks", "Flashblock receiver error: {:?}", e); + break; + } + } + } + info!(target: "flashblocks", "Flashblocks service stopped"); +} diff --git a/crates/flashblocks/src/sequence.rs b/crates/flashblocks/src/sequence.rs deleted file mode 100644 index dabbb94d..00000000 --- a/crates/flashblocks/src/sequence.rs +++ /dev/null @@ -1,740 +0,0 @@ -use crate::{FlashBlock, FlashBlockCompleteSequenceRx}; -use alloy_primitives::{Bytes, B256}; -use alloy_rpc_types_engine::PayloadId; -use core::mem; -use eyre::{bail, OptionExt}; -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; -use reth_revm::cached::CachedReads; -use std::{collections::BTreeMap, ops::Deref}; -use tokio::sync::broadcast; -use tracing::*; - -/// The size of the broadcast channel for completed flashblock sequences. -const FLASHBLOCK_SEQUENCE_CHANNEL_SIZE: usize = 128; - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -enum FollowupRejectionReason { - BlockNumber, - PayloadId, - BlockAndPayload, -} - -impl FollowupRejectionReason { - const fn as_str(self) -> &'static str { - match self { - Self::BlockNumber => "block_number_mismatch", - Self::PayloadId => "payload_id_mismatch", - Self::BlockAndPayload => "block_and_payload_mismatch", - } - } -} - -/// Outcome from executing a flashblock sequence. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[allow(unnameable_types)] -pub struct SequenceExecutionOutcome { - /// The block hash of the executed pending block - pub block_hash: B256, - /// Properly computed state root - pub state_root: B256, -} - -/// An ordered B-tree keeping the track of a sequence of [`FlashBlock`]s by their indices. -#[derive(Debug)] -pub struct FlashBlockPendingSequence { - /// tracks the individual flashblocks in order - inner: BTreeMap, - /// Broadcasts flashblocks to subscribers. - block_broadcaster: broadcast::Sender, - /// Optional execution outcome from building the current sequence. - execution_outcome: Option, - /// Cached state reads for the current block. - /// Current `PendingFlashBlock` is built out of a sequence of `FlashBlocks`, and executed again - /// when fb received on top of the same block. Avoid redundant I/O across multiple - /// executions within the same block. - cached_reads: Option, -} - -impl FlashBlockPendingSequence { - /// Create a new pending sequence. - pub fn new() -> Self { - // Note: if the channel is full, send will not block but rather overwrite the oldest - // messages. Order is preserved. - let (tx, _) = broadcast::channel(FLASHBLOCK_SEQUENCE_CHANNEL_SIZE); - Self { - inner: BTreeMap::new(), - block_broadcaster: tx, - execution_outcome: None, - cached_reads: None, - } - } - - /// Returns the sender half of the [`FlashBlockCompleteSequence`] channel. - pub const fn block_sequence_broadcaster( - &self, - ) -> &broadcast::Sender { - &self.block_broadcaster - } - - /// Gets a subscriber to the flashblock sequences produced. - pub fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { - self.block_broadcaster.subscribe() - } - - /// Returns whether this flashblock would be accepted into the current sequence. - pub fn can_accept(&self, flashblock: &FlashBlock) -> bool { - if flashblock.index == 0 { - return true; - } - - self.followup_rejection_reason(flashblock).is_none() - } - - fn followup_rejection_reason( - &self, - flashblock: &FlashBlock, - ) -> Option { - // only insert if we previously received the same block and payload, assume we received - // index 0 - let same_block = self.block_number() == Some(flashblock.block_number()); - let same_payload = self.payload_id() == Some(flashblock.payload_id); - if same_block && same_payload { - None - } else if !same_block && !same_payload { - Some(FollowupRejectionReason::BlockAndPayload) - } else if !same_block { - Some(FollowupRejectionReason::BlockNumber) - } else { - Some(FollowupRejectionReason::PayloadId) - } - } - - /// Inserts a new block into the sequence. - /// - /// A [`FlashBlock`] with index 0 resets the set. - pub fn insert(&mut self, flashblock: FlashBlock) { - if flashblock.index == 0 { - trace!(target: "flashblocks", number=%flashblock.block_number(), "Tracking new flashblock sequence"); - self.inner.insert(flashblock.index, flashblock); - return; - } - - if self.can_accept(&flashblock) { - trace!(target: "flashblocks", number=%flashblock.block_number(), index = %flashblock.index, block_count = self.inner.len() ,"Received followup flashblock"); - self.inner.insert(flashblock.index, flashblock); - } else { - let rejection_reason = self - .followup_rejection_reason(&flashblock) - .expect("non-accepted followup must have rejection reason"); - trace!( - target: "flashblocks", - number = %flashblock.block_number(), - index = %flashblock.index, - current_block_number = ?self.block_number(), - expected_payload_id = ?self.payload_id(), - incoming_payload_id = ?flashblock.payload_id, - rejection_reason = rejection_reason.as_str(), - "Ignoring untracked flashblock following" - ); - } - } - - /// Set execution outcome from building the flashblock sequence - pub const fn set_execution_outcome( - &mut self, - execution_outcome: Option, - ) { - self.execution_outcome = execution_outcome; - } - - /// Set cached reads for this sequence - pub fn set_cached_reads(&mut self, cached_reads: CachedReads) { - self.cached_reads = Some(cached_reads); - } - - /// Removes the cached reads for this sequence - pub const fn take_cached_reads(&mut self) -> Option { - self.cached_reads.take() - } - - /// Returns the first block number - pub fn block_number(&self) -> Option { - Some(self.inner.values().next()?.block_number()) - } - - /// Returns the payload base of the first tracked flashblock. - pub fn payload_base(&self) -> Option { - self.inner.values().next()?.base.clone() - } - - /// Returns the number of tracked flashblocks. - pub fn count(&self) -> usize { - self.inner.len() - } - - /// Returns the reference to the last flashblock. - pub fn last_flashblock(&self) -> Option<&FlashBlock> { - self.inner.last_key_value().map(|(_, b)| b) - } - - /// Returns the current/latest flashblock index in the sequence - pub fn index(&self) -> Option { - Some(self.inner.values().last()?.index) - } - /// Returns the payload id of the first tracked flashblock in the current sequence. - pub fn payload_id(&self) -> Option { - Some(self.inner.values().next()?.payload_id) - } - - /// Finalizes the current pending sequence and returns it as a complete sequence. - /// - /// Clears the internal state and returns an error if the sequence is empty or validation fails. - pub fn finalize(&mut self) -> eyre::Result { - if self.inner.is_empty() { - bail!("Cannot finalize empty flashblock sequence"); - } - - let flashblocks = mem::take(&mut self.inner); - let execution_outcome = mem::take(&mut self.execution_outcome); - self.cached_reads = None; - - FlashBlockCompleteSequence::new(flashblocks.into_values().collect(), execution_outcome) - } - - /// Returns an iterator over all flashblocks in the sequence. - pub fn flashblocks(&self) -> impl Iterator { - self.inner.values() - } -} - -impl Default for FlashBlockPendingSequence { - fn default() -> Self { - Self::new() - } -} - -/// A complete sequence of flashblocks, often corresponding to a full block. -/// -/// Ensures invariants of a complete flashblocks sequence. -/// If this entire sequence of flashblocks was executed on top of latest block, this also includes -/// the execution outcome with block hash and state root. -#[derive(Debug, Clone)] -pub struct FlashBlockCompleteSequence { - inner: Vec, - /// Optional execution outcome from building the flashblock sequence - execution_outcome: Option, -} - -impl FlashBlockCompleteSequence { - /// Create a complete sequence from a vector of flashblocks. - /// Ensure that: - /// * vector is not empty - /// * first flashblock have the base payload - /// * sequence of flashblocks is sound (successive index from 0, same payload id, ...) - pub fn new( - blocks: Vec, - execution_outcome: Option, - ) -> eyre::Result { - let first_block = blocks.first().ok_or_eyre("No flashblocks in sequence")?; - - // Ensure that first flashblock have base - first_block.base.as_ref().ok_or_eyre("Flashblock at index 0 has no base")?; - - // Ensure that index are successive from 0, have same block number and payload id - if !blocks.iter().enumerate().all(|(idx, block)| { - idx == block.index as usize - && block.payload_id == first_block.payload_id - && block.block_number() == first_block.block_number() - }) { - bail!("Flashblock inconsistencies detected in sequence"); - } - - Ok(Self { inner: blocks, execution_outcome }) - } - - /// Returns the block number - pub fn block_number(&self) -> u64 { - self.inner.first().unwrap().block_number() - } - - /// Returns the payload base of the first flashblock. - pub fn payload_base(&self) -> &OpFlashblockPayloadBase { - self.inner.first().unwrap().base.as_ref().unwrap() - } - - /// Returns the payload id shared by all flashblocks in the sequence. - pub fn payload_id(&self) -> PayloadId { - self.inner.first().unwrap().payload_id - } - - /// Returns the number of flashblocks in the sequence. - pub const fn count(&self) -> usize { - self.inner.len() - } - - /// Returns the last flashblock in the sequence. - pub fn last(&self) -> &FlashBlock { - self.inner.last().unwrap() - } - - /// Returns the execution outcome of the sequence. - pub const fn execution_outcome(&self) -> Option { - self.execution_outcome - } - - /// Updates execution outcome of the sequence. - pub const fn set_execution_outcome( - &mut self, - execution_outcome: Option, - ) { - self.execution_outcome = execution_outcome; - } - - /// Returns all transactions from all flashblocks in the sequence - pub fn all_transactions(&self) -> Vec { - self.inner.iter().flat_map(|fb| fb.diff.transactions.iter().cloned()).collect() - } -} - -impl Deref for FlashBlockCompleteSequence { - type Target = Vec; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -impl TryFrom for FlashBlockCompleteSequence { - type Error = eyre::Error; - fn try_from(sequence: FlashBlockPendingSequence) -> Result { - Self::new(sequence.inner.into_values().collect(), sequence.execution_outcome) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_utils::TestFlashBlockFactory; - - mod pending_sequence_insert { - use super::*; - - #[test] - fn test_insert_index_zero_creates_new_sequence() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).build(); - let payload_id = fb0.payload_id; - - sequence.insert(fb0); - - assert_eq!(sequence.count(), 1); - assert_eq!(sequence.block_number(), Some(100)); - assert_eq!(sequence.payload_id(), Some(payload_id)); - } - - #[test] - fn test_insert_followup_same_block_and_payload() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - sequence.insert(fb0.clone()); - - let fb1 = factory.flashblock_after(&fb0).build(); - sequence.insert(fb1.clone()); - - let fb2 = factory.flashblock_after(&fb1).build(); - sequence.insert(fb2); - - assert_eq!(sequence.count(), 3); - assert_eq!(sequence.index(), Some(2)); - } - - #[test] - fn test_insert_ignores_different_block_number() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - sequence.insert(fb0.clone()); - - // Try to insert followup with different block number - let fb1 = factory.flashblock_after(&fb0).block_number(101).build(); - sequence.insert(fb1); - - assert_eq!(sequence.count(), 1); - assert_eq!(sequence.block_number(), Some(100)); - } - - #[test] - fn test_insert_ignores_different_payload_id() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - let payload_id1 = fb0.payload_id; - sequence.insert(fb0.clone()); - - // Try to insert followup with different payload_id - let payload_id2 = alloy_rpc_types_engine::PayloadId::new([2u8; 8]); - let fb1 = factory.flashblock_after(&fb0).payload_id(payload_id2).build(); - sequence.insert(fb1); - - assert_eq!(sequence.count(), 1); - assert_eq!(sequence.payload_id(), Some(payload_id1)); - } - - #[test] - fn test_insert_maintains_btree_order() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - sequence.insert(fb0.clone()); - - let fb2 = factory.flashblock_after(&fb0).index(2).build(); - sequence.insert(fb2); - - let fb1 = factory.flashblock_after(&fb0).build(); - sequence.insert(fb1); - - let indices: Vec = sequence.flashblocks().map(|fb| fb.index).collect(); - assert_eq!(indices, vec![0, 1, 2]); - } - } - - mod pending_sequence_finalize { - use super::*; - - #[test] - fn test_finalize_empty_sequence_fails() { - let mut sequence = FlashBlockPendingSequence::new(); - let result = sequence.finalize(); - - assert!(result.is_err()); - assert_eq!( - result.unwrap_err().to_string(), - "Cannot finalize empty flashblock sequence" - ); - } - - #[test] - fn test_finalize_clears_pending_state() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - sequence.insert(fb0); - - assert_eq!(sequence.count(), 1); - - let _complete = sequence.finalize().unwrap(); - - // After finalize, sequence should be empty - assert_eq!(sequence.count(), 0); - assert_eq!(sequence.block_number(), None); - } - - #[test] - fn test_finalize_preserves_execution_outcome() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - sequence.insert(fb0); - - let outcome = - SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; - sequence.set_execution_outcome(Some(outcome)); - - let complete = sequence.finalize().unwrap(); - - assert_eq!(complete.execution_outcome(), Some(outcome)); - } - - #[test] - fn test_finalize_clears_cached_reads() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - sequence.insert(fb0); - - let cached_reads = CachedReads::default(); - sequence.set_cached_reads(cached_reads); - assert!(sequence.take_cached_reads().is_some()); - - let _complete = sequence.finalize().unwrap(); - - // Cached reads should be cleared - assert!(sequence.take_cached_reads().is_none()); - } - - #[test] - fn test_finalize_multiple_times_after_refill() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - // First sequence - let fb0 = factory.flashblock_at(0).build(); - sequence.insert(fb0); - - let complete1 = sequence.finalize().unwrap(); - assert_eq!(complete1.count(), 1); - - // Add new sequence for next block - let fb1 = factory.flashblock_for_next_block(&complete1.last().clone()).build(); - sequence.insert(fb1); - - let complete2 = sequence.finalize().unwrap(); - assert_eq!(complete2.count(), 1); - assert_eq!(complete2.block_number(), 101); - } - } - - mod complete_sequence_invariants { - use super::*; - - #[test] - fn test_new_empty_sequence_fails() { - let result = FlashBlockCompleteSequence::new(vec![], None); - assert!(result.is_err()); - assert_eq!(result.unwrap_err().to_string(), "No flashblocks in sequence"); - } - - #[test] - fn test_new_requires_base_at_index_zero() { - let factory = TestFlashBlockFactory::new(); - // Use builder() with index 1 first to create a flashblock, then change its index to 0 - // to bypass the auto-base creation logic - let mut fb0_no_base = factory.flashblock_at(1).build(); - fb0_no_base.index = 0; - fb0_no_base.base = None; - - let result = FlashBlockCompleteSequence::new(vec![fb0_no_base], None); - assert!(result.is_err()); - assert_eq!(result.unwrap_err().to_string(), "Flashblock at index 0 has no base"); - } - - #[test] - fn test_new_validates_successive_indices() { - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - // Skip index 1, go straight to 2 - let fb2 = factory.flashblock_after(&fb0).index(2).build(); - - let result = FlashBlockCompleteSequence::new(vec![fb0, fb2], None); - assert!(result.is_err()); - assert_eq!( - result.unwrap_err().to_string(), - "Flashblock inconsistencies detected in sequence" - ); - } - - #[test] - fn test_new_validates_same_block_number() { - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - let fb1 = factory.flashblock_after(&fb0).block_number(101).build(); - - let result = FlashBlockCompleteSequence::new(vec![fb0, fb1], None); - assert!(result.is_err()); - assert_eq!( - result.unwrap_err().to_string(), - "Flashblock inconsistencies detected in sequence" - ); - } - - #[test] - fn test_new_validates_same_payload_id() { - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - let payload_id2 = alloy_rpc_types_engine::PayloadId::new([2u8; 8]); - let fb1 = factory.flashblock_after(&fb0).payload_id(payload_id2).build(); - - let result = FlashBlockCompleteSequence::new(vec![fb0, fb1], None); - assert!(result.is_err()); - assert_eq!( - result.unwrap_err().to_string(), - "Flashblock inconsistencies detected in sequence" - ); - } - - #[test] - fn test_new_valid_single_flashblock() { - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).build(); - - let result = FlashBlockCompleteSequence::new(vec![fb0], None); - assert!(result.is_ok()); - - let complete = result.unwrap(); - assert_eq!(complete.count(), 1); - assert_eq!(complete.block_number(), 100); - } - - #[test] - fn test_new_valid_multiple_flashblocks() { - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - let fb1 = factory.flashblock_after(&fb0).build(); - let fb2 = factory.flashblock_after(&fb1).build(); - - let result = FlashBlockCompleteSequence::new(vec![fb0, fb1, fb2], None); - assert!(result.is_ok()); - - let complete = result.unwrap(); - assert_eq!(complete.count(), 3); - assert_eq!(complete.last().index, 2); - } - - #[test] - fn test_all_transactions_aggregates_correctly() { - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory - .flashblock_at(0) - .transactions(vec![Bytes::from_static(&[1, 2, 3]), Bytes::from_static(&[4, 5, 6])]) - .build(); - - let fb1 = factory - .flashblock_after(&fb0) - .transactions(vec![Bytes::from_static(&[7, 8, 9])]) - .build(); - - let complete = FlashBlockCompleteSequence::new(vec![fb0, fb1], None).unwrap(); - let all_txs = complete.all_transactions(); - - assert_eq!(all_txs.len(), 3); - assert_eq!(all_txs[0], Bytes::from_static(&[1, 2, 3])); - assert_eq!(all_txs[1], Bytes::from_static(&[4, 5, 6])); - assert_eq!(all_txs[2], Bytes::from_static(&[7, 8, 9])); - } - - #[test] - fn test_payload_base_returns_first_block_base() { - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - let fb1 = factory.flashblock_after(&fb0).build(); - - let complete = FlashBlockCompleteSequence::new(vec![fb0.clone(), fb1], None).unwrap(); - - assert_eq!(complete.payload_base().block_number, fb0.base.unwrap().block_number); - } - - #[test] - fn test_execution_outcome_mutation() { - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).build(); - - let mut complete = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); - assert!(complete.execution_outcome().is_none()); - - let outcome = - SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; - complete.set_execution_outcome(Some(outcome)); - - assert_eq!(complete.execution_outcome(), Some(outcome)); - } - - #[test] - fn test_deref_provides_vec_access() { - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - let fb1 = factory.flashblock_after(&fb0).build(); - - let complete = FlashBlockCompleteSequence::new(vec![fb0, fb1], None).unwrap(); - - // Use deref to access Vec methods - assert_eq!(complete.len(), 2); - assert!(!complete.is_empty()); - } - } - - mod sequence_conversion { - use super::*; - - #[test] - fn test_try_from_pending_to_complete_valid() { - let mut pending = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - pending.insert(fb0); - - let complete: Result = pending.try_into(); - assert!(complete.is_ok()); - assert_eq!(complete.unwrap().count(), 1); - } - - #[test] - fn test_try_from_pending_to_complete_empty_fails() { - let pending = FlashBlockPendingSequence::new(); - - let complete: Result = pending.try_into(); - assert!(complete.is_err()); - } - - #[test] - fn test_try_from_preserves_execution_outcome() { - let mut pending = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - pending.insert(fb0); - - let outcome = - SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; - pending.set_execution_outcome(Some(outcome)); - - let complete: FlashBlockCompleteSequence = pending.try_into().unwrap(); - assert_eq!(complete.execution_outcome(), Some(outcome)); - } - } - - mod pending_sequence_helpers { - use super::*; - - #[test] - fn test_last_flashblock_returns_highest_index() { - let mut sequence = FlashBlockPendingSequence::new(); - let factory = TestFlashBlockFactory::new(); - - let fb0 = factory.flashblock_at(0).build(); - sequence.insert(fb0.clone()); - - let fb1 = factory.flashblock_after(&fb0).build(); - sequence.insert(fb1); - - let last = sequence.last_flashblock().unwrap(); - assert_eq!(last.index, 1); - } - - #[test] - fn test_subscribe_block_sequence_channel() { - let sequence = FlashBlockPendingSequence::new(); - let mut rx = sequence.subscribe_block_sequence(); - - // Spawn a task that sends a complete sequence - let tx = sequence.block_sequence_broadcaster().clone(); - std::thread::spawn(move || { - let factory = TestFlashBlockFactory::new(); - let fb0 = factory.flashblock_at(0).build(); - let complete = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); - let _ = tx.send(complete); - }); - - // Should receive the broadcast - let received = rx.blocking_recv(); - assert!(received.is_ok()); - assert_eq!(received.unwrap().count(), 1); - } - } -} diff --git a/crates/flashblocks/src/service.rs b/crates/flashblocks/src/service.rs index 7d6aaee4..ec611f8d 100644 --- a/crates/flashblocks/src/service.rs +++ b/crates/flashblocks/src/service.rs @@ -1,543 +1,309 @@ use crate::{ - cache::{BuildApplyOutcome, BuildTicket, SequenceManager}, - pending_state::PendingStateRegistry, - tx_cache::TransactionCache, - validation::{CanonicalBlockFingerprint, ReconciliationStrategy}, - worker::{BuildResult, FlashBlockBuilder, FlashblockCachedReceipt}, - FlashBlock, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, InProgressFlashBlockRx, - PendingFlashBlock, + cache::{raw::RawFlashblocksCache, FlashblockStateCache}, + execution::{FlashblockReceipt, FlashblockSequenceValidator, OverlayProviderFactory}, + persist::{handle_persistence, handle_relay_flashblocks}, + state::{handle_canonical_stream, handle_execution_tasks, handle_incoming_flashblocks}, + ReceivedFlashblocksRx, }; -use alloy_primitives::B256; -use futures_util::{FutureExt, Stream, StreamExt}; -use metrics::{Counter, Gauge, Histogram}; -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; -use reth_evm::ConfigureEvm; -use reth_metrics::Metrics; -use reth_primitives_traits::{AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy}; -use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; -use reth_tasks::TaskExecutor; +use futures_util::Stream; use std::{ - sync::Arc, - time::{Duration, Instant}, -}; -use tokio::{ - sync::{mpsc, oneshot, watch}, - time::sleep, + collections::BTreeSet, + net::SocketAddr, + sync::{Arc, Condvar, Mutex, OnceLock}, }; +use tokio::sync::broadcast::Sender; use tracing::*; -const CONNECTION_BACKOUT_PERIOD: Duration = Duration::from_secs(5); +use alloy_eips::eip2718::Encodable2718; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; -/// Default maximum depth for pending blocks ahead of canonical. -const DEFAULT_MAX_DEPTH: u64 = 64; +use reth_chain_state::CanonStateNotificationStream; +use reth_engine_primitives::TreeConfig; +use reth_evm::ConfigureEvm; +use reth_node_core::dirs::{ChainPath, DataDirPath}; +use reth_optimism_forks::OpHardforks; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{ + BlockReader, HashedPostStateProvider, HeaderProvider, StateProviderFactory, StateReader, +}; +use reth_tasks::TaskExecutor; + +use xlayer_builder::{ + args::FlashblocksArgs, + broadcast::{WebSocketPublisher, XLayerFlashblockPayload}, + flashblocks::PayloadEventsSender, + metrics::{tokio::FlashblocksTaskMetrics, BuilderMetrics}, +}; -/// Capacity for the canonical block notification channel. -/// This bounds memory usage while allowing for some buffering during catch-up. -const CANONICAL_BLOCK_CHANNEL_CAPACITY: usize = 128; +pub const EXECUTION_TASK_QUEUE_CAPACITY: usize = 5; -/// Notification about a new canonical block for reconciliation. -#[derive(Debug, Clone)] -pub struct CanonicalBlockNotification { - /// The canonical block number. - pub block_number: u64, - /// Canonical block hash. - pub block_hash: B256, - /// Canonical parent hash. - pub parent_hash: B256, - /// Transaction hashes in the canonical block. - pub tx_hashes: Vec, +pub type ExecutionTaskQueue = Arc<(Mutex>, Condvar)>; + +/// Extension trait for [`ExecutionTaskQueue`] providing a flush operation. +pub trait ExecutionTaskQueueFlush { + /// Clears all pending execution tasks from the queue. + /// + /// Called when a flush is detected on the flashblocks state layer (reorg or stale + /// pending) to drain any queued block heights that were built against now-invalidated + /// state. The execution worker will re-enter its wait loop and pick up fresh tasks + /// from incoming flashblocks after this call. + fn flush(&self); } -/// The `FlashBlockService` maintains an in-memory [`PendingFlashBlock`] built out of a sequence of -/// [`FlashBlock`]s. -#[derive(Debug)] -pub struct FlashBlockService< - N: NodePrimitives, - S, - EvmConfig: ConfigureEvm + Unpin>, - Provider, -> { - /// Incoming flashblock stream. - incoming_flashblock_rx: S, - /// Receiver for canonical block notifications (bounded to prevent OOM). - canonical_block_rx: Option>, - /// Signals when a block build is in progress. - in_progress_tx: watch::Sender>, - /// Broadcast channel to forward received flashblocks from the subscription. - received_flashblocks_tx: tokio::sync::broadcast::Sender>, +impl ExecutionTaskQueueFlush for ExecutionTaskQueue { + fn flush(&self) { + match self.0.lock() { + Ok(mut queue) => { + let flushed = queue.len(); + queue.clear(); + if flushed > 0 { + warn!( + target: "flashblocks", + flushed, + "Execution task queue flushed on state reset" + ); + } + } + Err(err) => { + warn!( + target: "flashblocks", + %err, + "Failed to flush execution task queue: mutex poisoned" + ); + } + } + } +} - /// Executes flashblock sequences to build pending blocks. - builder: FlashBlockBuilder, - /// Task executor for spawning block build jobs. - spawner: TaskExecutor, - /// Currently running block build job with start time and result receiver. - job: Option>, - /// Manages flashblock sequences with caching and intelligent build selection. - sequences: SequenceManager, - /// Registry for pending block states to enable speculative building. - pending_states: PendingStateRegistry, - /// Transaction execution cache for incremental flashblock building. - tx_cache: TransactionCache, +/// Context for flashblocks RPC state handles. +pub struct FlashblocksRpcCtx { + /// Canonical chainstate provider. + pub provider: Provider, + /// Canonical state notification stream. + pub canon_state_rx: CanonStateNotificationStream, + /// Evm config for the sequence validator. + pub evm_config: EvmConfig, + /// Chain specs for the sequence validator. + pub chain_spec: Arc, + /// Node engine tree configuration for the sequence validator. + pub tree_config: TreeConfig, + /// Flashblocks RPC debug mode to enable state comparison. + pub debug_state_comparison: bool, +} - /// Epoch counter for state invalidation. - /// - /// Incremented whenever speculative state is cleared (reorg, catch-up, depth limit). - /// Used to detect and discard stale build results from in-flight jobs that were - /// started before the state was invalidated. - state_epoch: u64, +/// Context for handling flashblocks persistence and relaying. +pub struct FlashblocksPersistCtx { + /// Data directory for flashblocks persistence. + pub datadir: ChainPath, + /// Whether to relay flashblocks to the subscribers. + pub relay_flashblocks: bool, +} - /// Maximum depth for pending blocks ahead of canonical before clearing. - max_depth: u64, - /// `FlashBlock` service's metrics - metrics: FlashBlockServiceMetrics, +pub struct FlashblocksRpcService +where + N: NodePrimitives, + EvmConfig: ConfigureEvm, + ChainSpec: OpHardforks, +{ + /// Flashblock configurations. + args: FlashblocksArgs, + /// Flashblocks state cache (shared with RPC handlers). + flashblocks_state: FlashblockStateCache, + /// Flashblocks RPC context. + rpc_ctx: FlashblocksRpcCtx, + /// Flashblocks persist context. + persist_ctx: FlashblocksPersistCtx, + /// Task executor. + task_executor: TaskExecutor, + /// Broadcast channel to forward received flashblocks from the subscription. + received_flashblocks_tx: Sender>, } -impl FlashBlockService +impl FlashblocksRpcService where N: NodePrimitives, - N::Receipt: FlashblockCachedReceipt, - S: Stream> + Unpin + 'static, + N::Receipt: FlashblockReceipt, + N::SignedTx: Encodable2718, + N::Block: From>, EvmConfig: ConfigureEvm + Unpin> - + Clone + + Send + 'static, Provider: StateProviderFactory - + BlockReaderIdExt< - Header = HeaderTy, - Block = BlockTy, - Transaction = N::SignedTx, - Receipt = ReceiptTy, - > + Unpin + + HeaderProvider
::BlockHeader> + + OverlayProviderFactory + + BlockReader + + StateReader + + HashedPostStateProvider + + Unpin + Clone + + Send + 'static, + ChainSpec: OpHardforks + Send + Sync + 'static, { - /// Constructs a new `FlashBlockService` that receives [`FlashBlock`]s from `rx` stream. pub fn new( - incoming_flashblock_rx: S, - evm_config: EvmConfig, - provider: Provider, - spawner: TaskExecutor, - compute_state_root: bool, - ) -> Self { - let (in_progress_tx, _) = watch::channel(None); + args: FlashblocksArgs, + flashblocks_state: FlashblockStateCache, + task_executor: TaskExecutor, + rpc_ctx: FlashblocksRpcCtx, + persist_ctx: FlashblocksPersistCtx, + ) -> eyre::Result { let (received_flashblocks_tx, _) = tokio::sync::broadcast::channel(128); - Self { - incoming_flashblock_rx, - canonical_block_rx: None, - in_progress_tx, + Ok(Self { + args, + flashblocks_state, + rpc_ctx, + persist_ctx, + task_executor, received_flashblocks_tx, - builder: FlashBlockBuilder::new(evm_config, provider), - spawner, - job: None, - sequences: SequenceManager::new(compute_state_root), - pending_states: PendingStateRegistry::new(), - tx_cache: TransactionCache::new(), - state_epoch: 0, - max_depth: DEFAULT_MAX_DEPTH, - metrics: FlashBlockServiceMetrics::default(), - } + }) } - /// Sets the canonical block receiver for reconciliation. - /// - /// When canonical blocks are received, the service will reconcile the pending - /// flashblock state to handle catch-up and reorg scenarios. - /// - /// The channel should be bounded to prevent unbounded memory growth. Use - /// [`create_canonical_block_channel`] to create a properly sized channel. - pub fn with_canonical_block_rx( - mut self, - rx: mpsc::Receiver, - ) -> Self { - self.canonical_block_rx = Some(rx); - self + /// Returns a new subscription to received flashblocks. + pub fn subscribe_received_flashblocks(&self) -> ReceivedFlashblocksRx { + self.received_flashblocks_tx.subscribe() } - /// Sets the maximum depth for pending blocks ahead of canonical. - /// - /// If pending blocks get too far ahead of the canonical chain, the pending - /// state will be cleared to prevent unbounded memory growth. - pub const fn with_max_depth(mut self, max_depth: u64) -> Self { - self.max_depth = max_depth; - self - } - - /// Returns the sender half for the received flashblocks broadcast channel. - pub const fn flashblocks_broadcaster( - &self, - ) -> &tokio::sync::broadcast::Sender> { - &self.received_flashblocks_tx - } - - /// Returns the sender half for the flashblock sequence broadcast channel. - pub const fn block_sequence_broadcaster( - &self, - ) -> &tokio::sync::broadcast::Sender { - self.sequences.block_sequence_broadcaster() + pub fn spawn_persistence(&self) -> eyre::Result<()> { + // Spawn persistence handle + debug!(target: "flashblocks", "Initializing flashblocks persistence"); + let datadir = self.persist_ctx.datadir.clone(); + let rx = self.subscribe_received_flashblocks(); + self.task_executor.spawn_critical_task( + "xlayer-flashblocks-persistence", + Box::pin(async move { + handle_persistence(rx, datadir).await; + }), + ); + // Spawn relayer handle + if self.persist_ctx.relay_flashblocks { + let ws_addr = + SocketAddr::new(self.args.flashblocks_addr.parse()?, self.args.flashblocks_port); + let metrics = Arc::new(BuilderMetrics::default()); + let task_metrics = Arc::new(FlashblocksTaskMetrics::new()); + let ws_pub = Arc::new( + WebSocketPublisher::new( + ws_addr, + metrics, + &task_metrics.websocket_publisher, + self.args.ws_subscriber_limit, + ) + .map_err(|e| eyre::eyre!("Failed to create WebSocket publisher: {e}"))?, + ); + info!(target: "flashblocks", "WebSocket publisher initialized at {ws_addr}"); + + let rx = self.subscribe_received_flashblocks(); + self.task_executor.spawn_critical_task( + "xlayer-flashblocks-publish", + Box::pin(async move { + handle_relay_flashblocks(rx, ws_pub).await; + }), + ); + } + Ok(()) } - /// Returns a subscriber to the flashblock sequence. - pub fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { - self.sequences.subscribe_block_sequence() - } + pub fn spawn_rpc(self, incoming_rx: S) + where + S: Stream> + Unpin + Send + 'static, + { + debug!(target: "flashblocks", "Initializing flashblocks rpc"); + let raw_cache = Arc::new(RawFlashblocksCache::new()); + let validator = FlashblockSequenceValidator::new( + self.rpc_ctx.evm_config, + self.rpc_ctx.provider, + self.rpc_ctx.chain_spec, + self.flashblocks_state.clone(), + self.task_executor.clone(), + self.rpc_ctx.tree_config, + ); + let task_queue = Arc::new((Mutex::new(BTreeSet::new()), Condvar::new())); + + // Spawn incoming raw flashblocks handle. + let received_tx = self.received_flashblocks_tx.clone(); + self.task_executor.spawn_critical_task( + "xlayer-flashblocks-payload", + Box::pin(handle_incoming_flashblocks::( + incoming_rx, + received_tx, + raw_cache.clone(), + task_queue.clone(), + )), + ); + + // Spawn the flashblocks sequence execution task on a dedicated OS thread. + let cache = raw_cache.clone(); + let queue = task_queue.clone(); + reth_tasks::spawn_os_thread("xlayer-flashblocks-execution", move || { + handle_execution_tasks::(validator, cache, queue); + }); - /// Returns a receiver that signals when a flashblock is being built. - pub fn subscribe_in_progress(&self) -> InProgressFlashBlockRx { - self.in_progress_tx.subscribe() + // Spawn the canonical stream handle. + self.task_executor.spawn_critical_task( + "xlayer-flashblocks-canonical", + Box::pin(handle_canonical_stream::( + self.rpc_ctx.canon_state_rx, + self.flashblocks_state, + raw_cache, + task_queue, + self.rpc_ctx.debug_state_comparison, + )), + ); } - /// Drives the service and sends new blocks to the receiver. - /// - /// This loop: - /// 1. Checks if any build job has completed and processes results - /// 2. Receives and batches all immediately available flashblocks - /// 3. Processes canonical block notifications for reconciliation - /// 4. Attempts to build a block from the complete sequence - /// - /// Note: this should be spawned - pub async fn run(mut self, tx: watch::Sender>>) { - loop { - tokio::select! { - // Event 1: job exists, listen to job results - // Handle both successful results and channel errors (e.g., task panic) - job_result = async { - match self.job.as_mut() { - Some(job) => Some((&mut job.result_rx).await), - None => std::future::pending().await, - } - } => { - let job = self.job.take().unwrap(); - let _ = self.in_progress_tx.send(None); - - // Handle channel error (task panicked or was cancelled) - let Some(Ok((result, returned_cache))) = job_result else { - warn!( - target: "flashblocks", - "Build job channel closed unexpectedly (task may have panicked)" + pub fn spawn_prewarm(&self, events_sender: Arc>) + where + N: NodePrimitives< + Block = ::Block, + Receipt = ::Receipt, + >, + { + let mut pending_rx = self.flashblocks_state.subscribe_pending_sequence(); + if let Some(payload_events_sender) = events_sender.get().cloned() { + self.task_executor.spawn_critical_task( + "xlayer-flashblocks-prewarm", + Box::pin(async move { + use either::Either; + use reth_optimism_payload_builder::OpBuiltPayload; + use reth_optimism_primitives::OpPrimitives; + use reth_payload_builder_primitives::Events; + + while pending_rx.changed().await.is_ok() { + let Some(pending_sequence) = pending_rx + .borrow_and_update() + .clone() + .filter(|s| s.is_target_flashblock()) + else { + continue; + }; + let executed = &pending_sequence.executed_block; + let block = executed.recovered_block.clone_sealed_block(); + let trie_data = executed.trie_data(); + // Use accumulated trie_updates from all incremental sequence executions. + let accumulated_trie_updates = Arc::new( + pending_sequence + .prefix_execution_meta + .accumulated_trie_updates + .clone() + .into_sorted(), ); - // Re-initialize transaction cache since we lost the one sent to the task - self.tx_cache = TransactionCache::new(); - self.schedule_followup_build(); - continue; - }; - - // Check if the state epoch has changed since this job started. - // If so, the speculative state has been invalidated (e.g., by a reorg) - // and we should discard the build result AND the returned cache to avoid - // reintroducing stale state that was cleared during reconciliation. - if job.epoch != self.state_epoch { - trace!( - target: "flashblocks", - job_epoch = job.epoch, - current_epoch = self.state_epoch, - "Discarding stale build result and cache (state was invalidated)" + let built = + reth_payload_primitives::BuiltPayloadExecutedBlock:: { + recovered_block: executed.recovered_block.clone(), + execution_output: executed.execution_output.clone(), + hashed_state: Either::Right(trie_data.hashed_state), + trie_updates: Either::Right(accumulated_trie_updates), + }; + // Use default zero id — to avoid accumulating stale entries in the engine state tree. + let payload = OpBuiltPayload::::new( + reth_payload_builder::PayloadId::default(), + Arc::new(block), + alloy_primitives::U256::ZERO, + Some(built), ); - self.metrics.stale_builds_discarded.increment(1); - // Don't restore the returned cache - keep the cleared cache from reconciliation - self.schedule_followup_build(); - continue; - } - - // Restore the transaction cache from the spawned task (only if epoch matched) - self.tx_cache = returned_cache; - - match result { - Ok(Some(build_result)) => { - let pending = build_result.pending_flashblock; - let apply_outcome = self.sequences - .on_build_complete(job.ticket, Some((pending.clone(), build_result.cached_reads))); - - if apply_outcome.is_applied() { - // Record pending state for speculative building of subsequent blocks - self.pending_states.record_build(build_result.pending_state); - - let elapsed = job.start_time.elapsed(); - self.metrics.execution_duration.record(elapsed.as_secs_f64()); - - let _ = tx.send(Some(pending)); - } else { - match apply_outcome { - BuildApplyOutcome::RejectedPendingSequenceMismatch { .. } => { - self.metrics - .build_reject_pending_sequence_mismatch - .increment(1); - } - BuildApplyOutcome::RejectedPendingRevisionStale { .. } => { - self.metrics - .build_reject_pending_revision_stale - .increment(1); - } - BuildApplyOutcome::RejectedCachedSequenceMissing { .. } => { - self.metrics - .build_reject_cached_sequence_missing - .increment(1); - } - BuildApplyOutcome::SkippedNoBuildResult => { - self.metrics - .build_reject_missing_build_result - .increment(1); - } - BuildApplyOutcome::AppliedPending - | BuildApplyOutcome::AppliedCached { .. } => {} - } - trace!( - target: "flashblocks", - ?apply_outcome, - "Discarding build side effects due to rejected completion apply" - ); - } - } - Ok(None) => { - trace!(target: "flashblocks", "Build job returned None"); - } - Err(err) => { - warn!(target: "flashblocks", %err, "Build job failed"); - } - } - - // Drain runnable work after each completion instead of waiting for another - // external event. - self.schedule_followup_build(); - } - - // Event 2: New flashblock arrives (batch process all ready flashblocks) - result = self.incoming_flashblock_rx.next() => { - match result { - Some(Ok(flashblock)) => { - // Process first flashblock - self.process_flashblock(flashblock); - - // Batch process all other immediately available flashblocks - while let Some(result) = self.incoming_flashblock_rx.next().now_or_never().flatten() { - match result { - Ok(fb) => self.process_flashblock(fb), - Err(err) => warn!(target: "flashblocks", %err, "Error receiving flashblock"), - } - } - - self.try_start_build_job(); - } - Some(Err(err)) => { - warn!( - target: "flashblocks", - %err, - retry_period = CONNECTION_BACKOUT_PERIOD.as_secs(), - "Error receiving flashblock" - ); - sleep(CONNECTION_BACKOUT_PERIOD).await; - } - None => { - warn!(target: "flashblocks", "Flashblock stream ended"); - break; - } + let _ = payload_events_sender.send(Events::BuiltPayload(payload)); } - } - - // Event 3: Canonical block notification for reconciliation - Some(notification) = async { - match self.canonical_block_rx.as_mut() { - Some(rx) => rx.recv().await, - None => std::future::pending().await, - } - } => { - self.process_canonical_block(notification); - // Try to build after reconciliation in case we can now build - self.try_start_build_job(); - } - } - } - } - - /// Attempts to start the next build after a completion and records outcome metrics. - fn schedule_followup_build(&mut self) { - self.metrics.drain_followup_attempts.increment(1); - if self.try_start_build_job() { - self.metrics.drain_followup_started.increment(1); - } else { - self.metrics.drain_followup_noop.increment(1); - } - } - - /// Processes a canonical block notification and reconciles pending state. - fn process_canonical_block(&mut self, notification: CanonicalBlockNotification) { - let canonical_fingerprint = CanonicalBlockFingerprint { - block_number: notification.block_number, - block_hash: notification.block_hash, - parent_hash: notification.parent_hash, - tx_hashes: notification.tx_hashes, - }; - - let strategy = - self.sequences.process_canonical_block(canonical_fingerprint, self.max_depth); - - // Record metrics based on strategy - if matches!(strategy, ReconciliationStrategy::HandleReorg) { - self.metrics.reorg_count.increment(1); - } - - // Clear pending states and transaction cache for strategies that invalidate speculative - // state. Also increment the state epoch to invalidate any in-flight build jobs. - if matches!( - strategy, - ReconciliationStrategy::HandleReorg - | ReconciliationStrategy::CatchUp - | ReconciliationStrategy::DepthLimitExceeded { .. } - ) { - self.pending_states.clear(); - self.tx_cache.clear(); - self.state_epoch = self.state_epoch.wrapping_add(1); - trace!( - target: "flashblocks", - new_epoch = self.state_epoch, - ?strategy, - "State invalidated, incremented epoch" + }), ); } } - - /// Processes a single flashblock: notifies subscribers, records metrics, and inserts into - /// sequence. - fn process_flashblock(&mut self, flashblock: FlashBlock) { - self.notify_received_flashblock(&flashblock); - - if flashblock.index == 0 { - self.metrics.last_flashblock_length.record(self.sequences.pending().count() as f64); - } - - if let Err(err) = self.sequences.insert_flashblock(flashblock) { - trace!(target: "flashblocks", %err, "Failed to insert flashblock"); - } - } - - /// Notifies all subscribers about the received flashblock. - fn notify_received_flashblock(&self, flashblock: &FlashBlock) { - if self.received_flashblocks_tx.receiver_count() > 0 { - let _ = self.received_flashblocks_tx.send(Arc::new(flashblock.clone())); - } - } - - /// Attempts to build a block if no job is currently running and a buildable sequence exists. - fn try_start_build_job(&mut self) -> bool { - if self.job.is_some() { - return false; // Already building - } - - let Some(latest) = self.builder.provider().latest_header().ok().flatten() else { - return false; - }; - - // Prefer parent-hash-specific speculative context for the current pending sequence. - // Fall back to the latest speculative state when no exact parent match is found. - let pending_parent = self - .sequences - .pending() - .payload_base() - .and_then(|base| self.pending_states.get_state_for_parent(base.parent_hash).cloned()) - .or_else(|| self.pending_states.current().cloned()); - - let Some(candidate) = - self.sequences.next_buildable_args(latest.hash(), latest.timestamp(), pending_parent) - else { - return false; // Nothing buildable - }; - let ticket = candidate.ticket; - let args = candidate.args; - - // Spawn build job - let fb_info = FlashBlockBuildInfo { - parent_hash: args.base.parent_hash, - index: args.last_flashblock_index, - block_number: args.base.block_number, - }; - self.metrics.current_block_height.set(fb_info.block_number as f64); - self.metrics.current_index.set(fb_info.index as f64); - let _ = self.in_progress_tx.send(Some(fb_info)); - - // Take ownership of the transaction cache for the spawned task - let mut tx_cache = std::mem::take(&mut self.tx_cache); - - let (result_tx, result_rx) = oneshot::channel(); - let builder = self.builder.clone(); - self.spawner.spawn_blocking(move || { - let result = builder.execute(args, Some(&mut tx_cache)); - let _ = result_tx.send((result, tx_cache)); - }); - self.job = Some(BuildJob { - start_time: Instant::now(), - epoch: self.state_epoch, - ticket, - result_rx, - }); - true - } -} - -/// Information for a flashblock currently built -#[derive(Debug, Clone, Copy)] -pub struct FlashBlockBuildInfo { - /// Parent block hash - pub parent_hash: B256, - /// Flashblock index within the current block's sequence - pub index: u64, - /// Block number of the flashblock being built. - pub block_number: u64, -} - -/// A running build job with metadata for tracking and invalidation. -#[derive(Debug)] -struct BuildJob { - /// When the job was started. - start_time: Instant, - /// The state epoch when this job was started. - /// - /// If the service's `state_epoch` has changed by the time this job completes, - /// the result should be discarded as the speculative state has been invalidated. - epoch: u64, - /// Opaque ticket identifying the exact sequence snapshot targeted by this build job. - ticket: BuildTicket, - /// Receiver for the build result and returned transaction cache. - #[allow(clippy::type_complexity)] - result_rx: oneshot::Receiver<(eyre::Result>>, TransactionCache)>, -} - -/// Creates a bounded channel for canonical block notifications. -/// -/// This returns a sender/receiver pair with a bounded capacity to prevent -/// unbounded memory growth. If the receiver falls behind, senders will -/// block until space is available. -/// -/// Returns `(sender, receiver)` tuple for use with [`FlashBlockService::with_canonical_block_rx`]. -pub fn create_canonical_block_channel( -) -> (mpsc::Sender, mpsc::Receiver) { - mpsc::channel(CANONICAL_BLOCK_CHANNEL_CAPACITY) -} - -#[derive(Metrics)] -#[metrics(scope = "flashblock_service")] -struct FlashBlockServiceMetrics { - /// The last complete length of flashblocks per block. - last_flashblock_length: Histogram, - /// The duration applying flashblock state changes in seconds. - execution_duration: Histogram, - /// Current block height. - current_block_height: Gauge, - /// Current flashblock index. - current_index: Gauge, - /// Number of reorgs detected during canonical block reconciliation. - reorg_count: Counter, - /// Number of build results discarded due to state invalidation (reorg during build). - stale_builds_discarded: Counter, - /// Number of completions rejected because pending sequence identity no longer matched. - build_reject_pending_sequence_mismatch: Counter, - /// Number of completions rejected because pending revision no longer matched. - build_reject_pending_revision_stale: Counter, - /// Number of completions rejected because referenced cached sequence was missing. - build_reject_cached_sequence_missing: Counter, - /// Number of completions skipped due to missing build result payload. - build_reject_missing_build_result: Counter, - /// Number of follow-up drain scheduling attempts after build completion. - drain_followup_attempts: Counter, - /// Number of follow-up attempts that successfully started another build. - drain_followup_started: Counter, - /// Number of follow-up attempts where no buildable work was available. - drain_followup_noop: Counter, } diff --git a/crates/flashblocks/src/state.rs b/crates/flashblocks/src/state.rs new file mode 100644 index 00000000..96dd306b --- /dev/null +++ b/crates/flashblocks/src/state.rs @@ -0,0 +1,273 @@ +use crate::{ + cache::RawFlashblocksCache, + debug::debug_compare_flashblocks_bundle_states, + execution::validator::FlashblockSequenceValidator, + execution::{FlashblockReceipt, OverlayProviderFactory}, + service::{ExecutionTaskQueue, ExecutionTaskQueueFlush, EXECUTION_TASK_QUEUE_CAPACITY}, + FlashblockStateCache, +}; +use futures_util::{FutureExt, Stream, StreamExt}; +use std::{sync::Arc, time::Duration}; +use tokio::{sync::broadcast::Sender, time::sleep}; + +use tracing::*; + +use alloy_consensus::BlockHeader; +use alloy_eips::eip2718::Encodable2718; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; + +use reth_chain_state::CanonStateNotificationStream; +use reth_evm::ConfigureEvm; +use reth_optimism_forks::OpHardforks; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{ + BlockReader, HashedPostStateProvider, HeaderProvider, StateProviderFactory, StateReader, +}; + +use xlayer_builder::broadcast::XLayerFlashblockPayload; + +const CONNECTION_BACKOUT_PERIOD: Duration = Duration::from_secs(5); + +pub async fn handle_incoming_flashblocks( + mut incoming_rx: S, + received_tx: Sender>, + raw_cache: Arc>, + task_queue: ExecutionTaskQueue, +) where + S: Stream> + Unpin + Send + 'static, + N: NodePrimitives, +{ + info!(target: "flashblocks", "Flashblocks raw handle started"); + loop { + match incoming_rx.next().await { + Some(Ok(payload)) => { + if let Err(err) = + process_flashblock_payload::(payload, &received_tx, &raw_cache, &task_queue) + { + warn!( + target: "flashblocks", + %err, + "Error receiving flashblock payload" + ); + continue; + }; + + // Batch process all other immediately available flashblocks + while let Some(result) = incoming_rx.next().now_or_never().flatten() { + match result { + Ok(payload) => { + if let Err(err) = process_flashblock_payload::( + payload, + &received_tx, + &raw_cache, + &task_queue, + ) { + warn!( + target: "flashblocks", + %err, + "Error receiving flashblock payload" + ); + continue; + }; + } + Err(err) => { + warn!(target: "flashblocks", %err, "Error receiving flashblock"); + continue; + } + } + } + // Schedule executor + task_queue.1.notify_one(); + } + Some(Err(err)) => { + warn!( + target: "flashblocks:handle", + %err, + retry_period = CONNECTION_BACKOUT_PERIOD.as_secs(), + "Error receiving flashblock" + ); + sleep(CONNECTION_BACKOUT_PERIOD).await; + } + None => { + break; + } + } + } + warn!(target: "flashblocks:handle", "Flashblock payload handle ended"); +} + +fn process_flashblock_payload( + payload: XLayerFlashblockPayload, + received_tx: &tokio::sync::broadcast::Sender>, + raw_cache: &RawFlashblocksCache, + task_queue: &ExecutionTaskQueue, +) -> eyre::Result<()> { + if received_tx.receiver_count() > 0 { + let _ = received_tx.send(Arc::new(payload.clone())); + } + // Insert into raw cache + let height = payload.inner.block_number(); + raw_cache.handle_flashblock(payload)?; + + // Enqueue to execution tasks + let mut queue = + task_queue.0.lock().map_err(|e| eyre::eyre!("Task queue lock poisoned: {e}"))?; + if !queue.contains(&height) && queue.len() >= EXECUTION_TASK_QUEUE_CAPACITY { + // Queue is full — evict the lowest block height before inserting. + let evicted = queue.pop_first(); + warn!( + target: "flashblocks", + ?evicted, + new_height = height, + "Execution task queue full, evicting lowest height" + ); + } + queue.insert(height); + Ok(()) +} + +pub fn handle_execution_tasks( + mut validator: FlashblockSequenceValidator, + raw_cache: Arc>, + task_queue: ExecutionTaskQueue, +) where + N: NodePrimitives, + N::Receipt: FlashblockReceipt, + N::SignedTx: Encodable2718, + N::Block: From>, + EvmConfig: ConfigureEvm + Unpin> + + Send + + 'static, + Provider: StateProviderFactory + + HeaderProvider
::BlockHeader> + + OverlayProviderFactory + + BlockReader + + StateReader + + HashedPostStateProvider + + Unpin + + Clone + + Send + + 'static, + ChainSpec: OpHardforks + Send + Sync + 'static, +{ + info!(target: "flashblocks", "Flashblocks execution handle started"); + let (queue_mutex, condvar) = &*task_queue; + loop { + let execute_height = { + let guard = match queue_mutex.lock() { + Ok(g) => g, + Err(err) => { + warn!(target: "flashblocks", %err, "Task queue mutex poisoned, retrying"); + continue; + } + }; + let mut queue = match condvar.wait_while(guard, |q| q.is_empty()) { + Ok(g) => g, + Err(err) => { + warn!(target: "flashblocks", %err, "Task queue condvar wait poisoned, retrying"); + continue; + } + }; + queue.pop_first().unwrap() + }; + + // Extract buildable sequence for this height from raw cache + let Some(args) = raw_cache.try_get_buildable_args(execute_height) else { + trace!( + target: "flashblocks", + execute_height, + "No buildable args for execution task height, skipping" + ); + continue; + }; + debug!( + target: "flashblocks", + execute_height, + last_index = args.last_flashblock_index, + target_index = args.target_index, + "Executing flashblocks sequence" + ); + + if let Err(err) = validator.execute_sequence(args) { + warn!( + target: "flashblocks", + %err, + execute_height, + "Validator failed to execute flashblocks sequence" + ); + } + } +} + +pub async fn handle_canonical_stream( + mut canon_rx: CanonStateNotificationStream, + flashblocks_state: FlashblockStateCache, + raw_cache: Arc>, + task_queue: ExecutionTaskQueue, + debug_state_comparison: bool, +) { + let mut trie_updates = None; + let mut pending_rx = if debug_state_comparison { + Some(flashblocks_state.subscribe_pending_sequence()) + } else { + None + }; + + info!(target: "flashblocks", "Canonical state handler started"); + loop { + // Use select! to race canonical notifications with pending sequence updates. + // Pending sequence updates are only processed in debug mode to capture + // accumulated trie_updates before the block is promoted to confirm. + let notification = if let Some(ref mut rx) = pending_rx { + tokio::select! { + result = canon_rx.next() => { + match result { + Some(notification) => notification, + None => break, + } + }, + Ok(()) = rx.changed() => { + if let Some(seq) = rx.borrow_and_update().as_ref() + .filter(|s| s.is_target_flashblock()) + { + trie_updates = Some((seq.get_height(), seq.prefix_execution_meta.accumulated_trie_updates.clone().into_sorted())); + } + continue; + } + } + } else { + match canon_rx.next().await { + Some(n) => n, + None => break, + } + }; + + let tip = notification.tip(); + let block_hash = tip.hash(); + let block_number = tip.number(); + let is_reorg = notification.reverted().is_some(); + + if debug_state_comparison { + debug_compare_flashblocks_bundle_states( + &flashblocks_state, + block_number, + block_hash, + trie_updates.take().filter(|t| t.0 == block_number).map(|t| t.1), + ); + } + + raw_cache.handle_canonical_height(block_number); + if flashblocks_state.handle_canonical_block((block_number, block_hash), is_reorg) { + task_queue.flush(); + } + + debug!( + target: "flashblocks", + block_number, + ?block_hash, + is_reorg, + "Canonical block processed" + ); + } + warn!(target: "flashblocks", "Canonical state stream ended"); +} diff --git a/crates/flashblocks/src/subscription/mod.rs b/crates/flashblocks/src/subscription/mod.rs new file mode 100644 index 00000000..b54b4a31 --- /dev/null +++ b/crates/flashblocks/src/subscription/mod.rs @@ -0,0 +1,4 @@ +pub mod pubsub; +mod rpc; + +pub use rpc::FlashblocksPubSub; diff --git a/crates/flashblocks/src/pubsub.rs b/crates/flashblocks/src/subscription/pubsub.rs similarity index 99% rename from crates/flashblocks/src/pubsub.rs rename to crates/flashblocks/src/subscription/pubsub.rs index 5f16c47f..d82c3c92 100644 --- a/crates/flashblocks/src/pubsub.rs +++ b/crates/flashblocks/src/subscription/pubsub.rs @@ -1,12 +1,14 @@ +use jsonrpsee::types::ErrorObject; +use serde::{Deserialize, Serialize}; +use std::collections::HashSet; + use alloy_primitives::{Address, TxHash}; use alloy_rpc_types_eth::{ pubsub::{Params as AlloyParams, SubscriptionKind as AlloySubscriptionKind}, Header, }; -use jsonrpsee::types::ErrorObject; + use reth_rpc_server_types::result::invalid_params_rpc_err; -use serde::{Deserialize, Serialize}; -use std::collections::HashSet; const FLASHBLOCKS: &str = "flashblocks"; diff --git a/crates/flashblocks/src/subscription.rs b/crates/flashblocks/src/subscription/rpc.rs similarity index 97% rename from crates/flashblocks/src/subscription.rs rename to crates/flashblocks/src/subscription/rpc.rs index bc8ce2fb..53e1955e 100644 --- a/crates/flashblocks/src/subscription.rs +++ b/crates/flashblocks/src/subscription/rpc.rs @@ -1,11 +1,11 @@ -use crate::pubsub::{ - EnrichedTransaction, FlashblockParams, FlashblockStreamEvent, FlashblockSubscriptionKind, - FlashblocksFilter, +use crate::{ + subscription::pubsub::{ + EnrichedTransaction, FlashblockParams, FlashblockStreamEvent, FlashblockSubscriptionKind, + FlashblocksFilter, + }, + PendingSequence, PendingSequenceRx, }; -use alloy_consensus::{transaction::TxHashRef, BlockHeader as _, Transaction as _, TxReceipt as _}; -use alloy_json_rpc::RpcObject; -use alloy_primitives::{Address, TxHash, U256}; -use alloy_rpc_types_eth::{Header, TransactionInfo}; + use futures::StreamExt; use jsonrpsee::{ proc_macros::rpc, server::SubscriptionMessage, types::ErrorObject, PendingSubscriptionSink, @@ -13,7 +13,14 @@ use jsonrpsee::{ }; use moka::policy::EvictionPolicy; use moka::sync::Cache; -use reth_optimism_flashblocks::{PendingBlockRx, PendingFlashBlock}; +use std::{collections::HashSet, future::ready, sync::Arc}; +use tokio_stream::{wrappers::WatchStream, Stream}; + +use alloy_consensus::{transaction::TxHashRef, BlockHeader as _, Transaction as _, TxReceipt as _}; +use alloy_json_rpc::RpcObject; +use alloy_primitives::{Address, TxHash, U256}; +use alloy_rpc_types_eth::{Header, TransactionInfo}; + use reth_primitives_traits::{ NodePrimitives, Recovered, RecoveredBlock, SealedBlock, TransactionMeta, }; @@ -25,8 +32,6 @@ use reth_rpc_server_types::result::{internal_rpc_err, invalid_params_rpc_err}; use reth_storage_api::BlockNumReader; use reth_tasks::TaskSpawner; use reth_tracing::tracing::{trace, warn}; -use std::{collections::HashSet, future::ready, sync::Arc}; -use tokio_stream::{wrappers::WatchStream, Stream}; const MAX_TXHASH_CACHE_SIZE: u64 = 10_000; @@ -87,7 +92,7 @@ where /// Subscription tasks are spawned via [`tokio::task::spawn`] pub fn new( eth_pubsub: EthPubSub, - pending_block_rx: PendingBlockRx, + pending_block_rx: PendingSequenceRx, subscription_task_spawner: Box, tx_converter: Eth::RpcConvert, max_subscribed_addresses: usize, @@ -193,7 +198,7 @@ where #[derive(Clone)] pub struct FlashblocksPubSubInner { /// Pending block receiver from flashblocks, if available - pub(crate) pending_block_rx: PendingBlockRx, + pub(crate) pending_block_rx: PendingSequenceRx, /// The type that's used to spawn subscription tasks. pub(crate) subscription_task_spawner: Box, /// RPC transaction converter. @@ -233,7 +238,7 @@ where /// Convert a flashblock into a stream of events (header + transaction messages) fn flashblock_to_stream_events( - pending_block: &PendingFlashBlock, + pending_block: &PendingSequence, filter: &FlashblocksFilter, tx_converter: &Eth::RpcConvert, txhash_cache: &Cache, @@ -482,7 +487,7 @@ where /// Extract `Header` from `PendingFlashBlock` fn extract_header_from_pending_block( - pending_block: &PendingFlashBlock, + pending_block: &PendingSequence, ) -> Result, ErrorObject<'static>> { let block = pending_block.block(); Ok(Header::from_consensus( diff --git a/crates/flashblocks/src/test_utils.rs b/crates/flashblocks/src/test_utils.rs index deea2cf5..777fb6c5 100644 --- a/crates/flashblocks/src/test_utils.rs +++ b/crates/flashblocks/src/test_utils.rs @@ -1,73 +1,105 @@ -//! Test utilities for flashblocks. -//! -//! Provides a factory for creating test flashblocks with automatic timestamp management. -//! -//! # Examples -//! -//! ## Simple: Create a flashblock sequence for the same block -//! -//! ```ignore -//! let factory = TestFlashBlockFactory::new(); // Default 2 second block time -//! let fb0 = factory.flashblock_at(0).build(); -//! let fb1 = factory.flashblock_after(&fb0).build(); -//! let fb2 = factory.flashblock_after(&fb1).build(); -//! ``` -//! -//! ## Create flashblocks with transactions -//! -//! ```ignore -//! let factory = TestFlashBlockFactory::new(); -//! let fb0 = factory.flashblock_at(0).build(); -//! let txs = vec![Bytes::from_static(&[1, 2, 3])]; -//! let fb1 = factory.flashblock_after(&fb0).transactions(txs).build(); -//! ``` -//! -//! ## Test across multiple blocks (timestamps auto-increment) -//! -//! ```ignore -//! let factory = TestFlashBlockFactory::new(); // Default 2 second blocks -//! -//! // Block 100 at timestamp 1000000 -//! let fb0 = factory.flashblock_at(0).build(); -//! let fb1 = factory.flashblock_after(&fb0).build(); -//! -//! // Block 101 at timestamp 1000002 (auto-incremented by block_time) -//! let fb2 = factory.flashblock_for_next_block(&fb1).build(); -//! let fb3 = factory.flashblock_after(&fb2).build(); -//! ``` -//! -//! ## Full control with builder -//! -//! ```ignore -//! let factory = TestFlashBlockFactory::new(); -//! let fb = factory.builder() -//! .block_number(100) -//! .parent_hash(specific_hash) -//! .state_root(computed_root) -//! .transactions(txs) -//! .build(); -//! ``` +use std::sync::Arc; -use crate::FlashBlock; -use alloy_primitives::{Address, Bloom, Bytes, B256, U256}; +use alloy_consensus::{Header, Receipt, TxEip7702}; +use alloy_primitives::{Address, Bloom, Bytes, Signature, B256, U256}; use alloy_rpc_types_engine::PayloadId; +use op_alloy_consensus::OpTypedTransaction; use op_alloy_rpc_types_engine::{ - OpFlashblockPayloadBase, OpFlashblockPayloadDelta, OpFlashblockPayloadMetadata, + OpFlashblockPayload, OpFlashblockPayloadBase, OpFlashblockPayloadDelta, + OpFlashblockPayloadMetadata, }; -/// Factory for creating test flashblocks with automatic timestamp management. -/// -/// Tracks `block_time` to automatically increment timestamps when creating new blocks. -/// Returns builders that can be further customized before calling `build()`. -/// -/// # Examples -/// -/// ```ignore -/// let factory = TestFlashBlockFactory::new(); // Default 2 second block time -/// let fb0 = factory.flashblock_at(0).build(); -/// let fb1 = factory.flashblock_after(&fb0).build(); -/// let fb2 = factory.flashblock_for_next_block(&fb1).build(); // timestamp auto-increments -/// ``` +use reth_chain_state::{ComputedTrieData, ExecutedBlock}; +use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; +use reth_optimism_primitives::{ + OpBlock, OpBlockBody, OpPrimitives, OpReceipt, OpTransactionSigned, +}; +use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; + +pub(crate) fn mock_tx(nonce: u64) -> OpTransactionSigned { + let tx = TxEip7702 { + chain_id: 1u64, + nonce, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 10, + to: Address::default(), + value: U256::from(3_u64), + input: Bytes::from(vec![1, 2]), + access_list: Default::default(), + authorization_list: Default::default(), + }; + let signature = Signature::new(U256::default(), U256::default(), true); + OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature) +} + +pub(crate) fn make_executed_block( + block_number: u64, + parent_hash: B256, +) -> ExecutedBlock { + let header = Header { number: block_number, parent_hash, ..Default::default() }; + let sealed_header = SealedHeader::seal_slow(header); + let block = OpBlock::new(sealed_header.unseal(), Default::default()); + let sealed_block = SealedBlock::seal_slow(block); + let recovered_block = RecoveredBlock::new_sealed(sealed_block, vec![]); + let execution_output = Arc::new(BlockExecutionOutput { + result: BlockExecutionResult { + receipts: vec![], + requests: Default::default(), + gas_used: 0, + blob_gas_used: 0, + }, + state: Default::default(), + }); + ExecutedBlock::new(Arc::new(recovered_block), execution_output, ComputedTrieData::default()) +} + +pub(crate) fn empty_receipts() -> Arc> { + Arc::new(vec![]) +} + +pub(crate) fn make_executed_block_with_txs( + block_number: u64, + parent_hash: B256, + nonce_start: u64, + count: usize, +) -> (ExecutedBlock, Arc>) { + let txs: Vec = + (0..count).map(|i| mock_tx(nonce_start + i as u64)).collect(); + let senders: Vec
= (0..count).map(|_| Address::default()).collect(); + let receipts: Vec = (0..count) + .map(|i| { + OpReceipt::Eip7702(Receipt { + status: true.into(), + cumulative_gas_used: 21_000 * (i as u64 + 1), + logs: vec![], + }) + }) + .collect(); + + let header = Header { number: block_number, parent_hash, ..Default::default() }; + let sealed_header = SealedHeader::seal_slow(header); + let body = OpBlockBody { transactions: txs, ..Default::default() }; + let block = OpBlock::new(sealed_header.unseal(), body); + let sealed_block = SealedBlock::seal_slow(block); + let recovered_block = RecoveredBlock::new_sealed(sealed_block, senders); + let execution_output = Arc::new(BlockExecutionOutput { + result: BlockExecutionResult { + receipts: receipts.clone(), + requests: Default::default(), + gas_used: 21_000 * count as u64, + blob_gas_used: 0, + }, + state: Default::default(), + }); + let executed = ExecutedBlock::new( + Arc::new(recovered_block), + execution_output, + ComputedTrieData::default(), + ); + (executed, Arc::new(receipts)) +} + #[derive(Debug)] pub(crate) struct TestFlashBlockFactory { /// Block time in seconds (used to auto-increment timestamps) @@ -79,47 +111,22 @@ pub(crate) struct TestFlashBlockFactory { } impl TestFlashBlockFactory { - /// Creates a new factory with a default block time of 2 seconds. - /// /// Use [`with_block_time`](Self::with_block_time) to customize the block time. pub(crate) fn new() -> Self { Self { block_time: 2, base_timestamp: 1_000_000, current_block_number: 100 } } + #[allow(dead_code)] pub(crate) fn with_block_time(mut self, block_time: u64) -> Self { self.block_time = block_time; self } - /// Creates a builder for a flashblock at the specified index (within the current block). - /// - /// Returns a builder with index set, allowing further customization before building. - /// - /// # Examples - /// - /// ```ignore - /// let factory = TestFlashBlockFactory::new(); - /// let fb0 = factory.flashblock_at(0).build(); // Simple usage - /// let fb1 = factory.flashblock_at(1).state_root(specific_root).build(); // Customize - /// ``` pub(crate) fn flashblock_at(&self, index: u64) -> TestFlashBlockBuilder { self.builder().index(index).block_number(self.current_block_number) } - /// Creates a builder for a flashblock following the previous one in the same sequence. - /// - /// Automatically increments the index and maintains `block_number` and `payload_id`. - /// Returns a builder allowing further customization. - /// - /// # Examples - /// - /// ```ignore - /// let factory = TestFlashBlockFactory::new(); - /// let fb0 = factory.flashblock_at(0).build(); - /// let fb1 = factory.flashblock_after(&fb0).build(); // Simple - /// let fb2 = factory.flashblock_after(&fb1).transactions(txs).build(); // With txs - /// ``` - pub(crate) fn flashblock_after(&self, previous: &FlashBlock) -> TestFlashBlockBuilder { + pub(crate) fn flashblock_after(&self, previous: &OpFlashblockPayload) -> TestFlashBlockBuilder { let parent_hash = previous.base.as_ref().map(|b| b.parent_hash).unwrap_or(previous.diff.block_hash); @@ -131,21 +138,10 @@ impl TestFlashBlockFactory { .timestamp(previous.base.as_ref().map(|b| b.timestamp).unwrap_or(self.base_timestamp)) } - /// Creates a builder for a flashblock for the next block, starting a new sequence at index 0. - /// - /// Increments block number, uses previous `block_hash` as `parent_hash`, generates new - /// `payload_id`, and automatically increments the timestamp by `block_time`. - /// Returns a builder allowing further customization. - /// - /// # Examples - /// - /// ```ignore - /// let factory = TestFlashBlockFactory::new(); // 2 second blocks - /// let fb0 = factory.flashblock_at(0).build(); // Block 100, timestamp 1000000 - /// let fb1 = factory.flashblock_for_next_block(&fb0).build(); // Block 101, timestamp 1000002 - /// let fb2 = factory.flashblock_for_next_block(&fb1).transactions(txs).build(); // Customize - /// ``` - pub(crate) fn flashblock_for_next_block(&self, previous: &FlashBlock) -> TestFlashBlockBuilder { + pub(crate) fn flashblock_for_next_block( + &self, + previous: &OpFlashblockPayload, + ) -> TestFlashBlockBuilder { let prev_timestamp = previous.base.as_ref().map(|b| b.timestamp).unwrap_or(self.base_timestamp); @@ -157,21 +153,6 @@ impl TestFlashBlockFactory { .timestamp(prev_timestamp + self.block_time) } - /// Returns a custom builder for full control over flashblock creation. - /// - /// Use this when the convenience methods don't provide enough control. - /// - /// # Examples - /// - /// ```ignore - /// let factory = TestFlashBlockFactory::new(); - /// let fb = factory.builder() - /// .index(5) - /// .block_number(200) - /// .parent_hash(specific_hash) - /// .state_root(computed_root) - /// .build(); - /// ``` pub(crate) fn builder(&self) -> TestFlashBlockBuilder { TestFlashBlockBuilder { index: 0, @@ -193,9 +174,6 @@ impl TestFlashBlockFactory { } } -/// Custom builder for creating test flashblocks with full control. -/// -/// Created via [`TestFlashBlockFactory::builder()`]. #[derive(Debug)] pub(crate) struct TestFlashBlockBuilder { index: u64, @@ -216,81 +194,68 @@ pub(crate) struct TestFlashBlockBuilder { } impl TestFlashBlockBuilder { - /// Sets the flashblock index. pub(crate) fn index(mut self, index: u64) -> Self { self.index = index; self } - /// Sets the block number. pub(crate) fn block_number(mut self, block_number: u64) -> Self { self.block_number = block_number; self } - /// Sets the payload ID. pub(crate) fn payload_id(mut self, payload_id: PayloadId) -> Self { self.payload_id = payload_id; self } - /// Sets the parent hash. pub(crate) fn parent_hash(mut self, parent_hash: B256) -> Self { self.parent_hash = parent_hash; self } - /// Sets the timestamp. pub(crate) fn timestamp(mut self, timestamp: u64) -> Self { self.timestamp = timestamp; self } - /// Sets the base payload. Automatically created for index 0 if not set. #[allow(dead_code)] pub(crate) fn base(mut self, base: OpFlashblockPayloadBase) -> Self { self.base = Some(base); self } - /// Sets the block hash in the diff. #[allow(dead_code)] pub(crate) fn block_hash(mut self, block_hash: B256) -> Self { self.block_hash = block_hash; self } - /// Sets the state root in the diff. #[allow(dead_code)] pub(crate) fn state_root(mut self, state_root: B256) -> Self { self.state_root = state_root; self } - /// Sets the receipts root in the diff. #[allow(dead_code)] pub(crate) fn receipts_root(mut self, receipts_root: B256) -> Self { self.receipts_root = receipts_root; self } - /// Sets the transactions in the diff. + #[allow(dead_code)] pub(crate) fn transactions(mut self, transactions: Vec) -> Self { self.transactions = transactions; self } - /// Sets the gas used in the diff. #[allow(dead_code)] pub(crate) fn gas_used(mut self, gas_used: u64) -> Self { self.gas_used = gas_used; self } - /// Builds the flashblock. - /// - /// If index is 0 and no base was explicitly set, creates a default base. - pub(crate) fn build(mut self) -> FlashBlock { + pub(crate) fn build(mut self) -> OpFlashblockPayload { // Auto-create base for index 0 if not set if self.index == 0 && self.base.is_none() { self.base = Some(OpFlashblockPayloadBase { @@ -306,7 +271,7 @@ impl TestFlashBlockBuilder { }); } - FlashBlock { + OpFlashblockPayload { index: self.index, payload_id: self.payload_id, base: self.base, diff --git a/crates/flashblocks/src/tx_cache.rs b/crates/flashblocks/src/tx_cache.rs deleted file mode 100644 index f03d5e0c..00000000 --- a/crates/flashblocks/src/tx_cache.rs +++ /dev/null @@ -1,702 +0,0 @@ -//! Transaction execution caching for flashblock building. -//! -//! When flashblocks arrive incrementally, each new flashblock triggers a rebuild of pending -//! state from all transactions in the sequence. Without caching, this means re-reading -//! state from disk for accounts/storage that were already loaded in previous builds. -//! -//! # Approach -//! -//! This module caches the cumulative bundle state from previous executions. When the next -//! flashblock arrives, if its transaction list is a continuation of the cached list, the -//! cached bundle can be used as a **prestate** for the State builder. This avoids redundant -//! disk reads for accounts/storage that were already modified. -//! -//! **Important**: Prefix transaction skipping is only safe when the incoming transaction list -//! fully extends the cached list. In that case, callers can execute only the uncached suffix -//! and stitch in the cached prefix receipts/metadata. -//! -//! The cache stores: -//! - Ordered list of executed transaction hashes (for prefix matching) -//! - Cumulative bundle state after all cached transactions (used as prestate) -//! - Cumulative receipts for all cached transactions (for future optimization) -//! - Block-level execution metadata for cached transactions (gas/requests) -//! -//! # Example -//! -//! ```text -//! Flashblock 0: txs [A, B] -//! -> Execute A, B from scratch (cold state reads) -//! -> Cache: txs=[A,B], bundle=state_after_AB -//! -//! Flashblock 1: txs [A, B, C] -//! -> Prefix [A, B] matches cache -//! -> Use cached bundle as prestate (warm state) -//! -> Execute A, B, C (A, B hit prestate cache, faster) -//! -> Cache: txs=[A,B,C], bundle=state_after_ABC -//! -//! Flashblock 2 (reorg): txs [A, D, E] -//! -> Prefix [A] matches, but tx[1]=D != B -//! -> Cached prestate may be partially useful, but diverges -//! -> Execute A, D, E -//! ``` - -use alloy_eips::eip7685::Requests; -use alloy_primitives::B256; -use reth_primitives_traits::NodePrimitives; -use reth_revm::db::BundleState; - -/// Cached block-level execution metadata for the stored transaction prefix. -#[derive(Debug, Clone, Default, PartialEq, Eq)] -pub(crate) struct CachedExecutionMeta { - /// EIP-7685 requests emitted while executing the cached prefix. - pub requests: Requests, - /// Total gas used by the cached prefix. - pub gas_used: u64, - /// Total blob/DA gas used by the cached prefix. - pub blob_gas_used: u64, -} - -/// Resumable cached state: bundle + receipts + cached prefix length. -pub(crate) type ResumableState<'a, N> = - (&'a BundleState, &'a [::Receipt], usize); - -/// Resumable cached state plus execution metadata for the cached prefix. -pub(crate) type ResumableStateWithExecutionMeta<'a, N> = - (&'a BundleState, &'a [::Receipt], &'a Requests, u64, u64, usize); - -/// Cache of transaction execution results for a single block. -/// -/// Stores cumulative execution state that can be used as a prestate to avoid -/// redundant disk reads when re-executing transactions. The cached bundle provides -/// warm state for accounts/storage already loaded, improving execution performance. -/// -/// **Note**: This cache does NOT skip transaction execution - all transactions must -/// still be executed to populate the block body. The cache only optimizes state reads. -/// -/// The cache is invalidated when: -/// - A new block starts (different block number) -/// - Parent hash changes for parent-scoped lookups -/// - A reorg is detected (transaction list diverges from cached prefix) -/// - Explicitly cleared -#[derive(Debug)] -pub struct TransactionCache { - /// Block number this cache is valid for. - block_number: u64, - /// Parent hash this cache is valid for. - cached_parent_hash: Option, - /// Ordered list of transaction hashes that have been executed. - executed_tx_hashes: Vec, - /// Cumulative bundle state after executing all cached transactions. - cumulative_bundle: BundleState, - /// Receipts for all cached transactions, in execution order. - receipts: Vec, - /// Cached block-level execution metadata. - execution_meta: CachedExecutionMeta, -} - -impl Default for TransactionCache { - fn default() -> Self { - Self::new() - } -} - -impl TransactionCache { - /// Creates a new empty transaction cache. - pub fn new() -> Self { - Self { - block_number: 0, - cached_parent_hash: None, - executed_tx_hashes: Vec::new(), - cumulative_bundle: BundleState::default(), - receipts: Vec::new(), - execution_meta: CachedExecutionMeta::default(), - } - } - - /// Creates a new cache for a specific block number. - pub fn for_block(block_number: u64) -> Self { - Self { block_number, ..Self::new() } - } - - /// Returns the block number this cache is valid for. - pub const fn block_number(&self) -> u64 { - self.block_number - } - - /// Returns the parent hash this cache is valid for, if tracked. - pub const fn parent_hash(&self) -> Option { - self.cached_parent_hash - } - - /// Checks if this cache is valid for the given block number. - pub const fn is_valid_for_block(&self, block_number: u64) -> bool { - self.block_number == block_number - } - - /// Checks if this cache is valid for the given block number and parent hash. - pub fn is_valid_for_block_parent(&self, block_number: u64, parent_hash: B256) -> bool { - self.block_number == block_number && self.cached_parent_hash == Some(parent_hash) - } - - /// Returns the number of cached transactions. - pub const fn len(&self) -> usize { - self.executed_tx_hashes.len() - } - - /// Returns true if the cache is empty. - pub const fn is_empty(&self) -> bool { - self.executed_tx_hashes.is_empty() - } - - /// Returns the cached transaction hashes. - pub fn executed_tx_hashes(&self) -> &[B256] { - &self.executed_tx_hashes - } - - /// Returns the cached receipts. - pub fn receipts(&self) -> &[N::Receipt] { - &self.receipts - } - - /// Returns the cumulative bundle state. - pub const fn bundle(&self) -> &BundleState { - &self.cumulative_bundle - } - - /// Clears the cache. - pub fn clear(&mut self) { - self.executed_tx_hashes.clear(); - self.cumulative_bundle = BundleState::default(); - self.receipts.clear(); - self.execution_meta = CachedExecutionMeta::default(); - self.block_number = 0; - self.cached_parent_hash = None; - } - - /// Updates the cache for a new block, clearing if the block number changed. - /// - /// Returns true if the cache was cleared. - pub fn update_for_block(&mut self, block_number: u64) -> bool { - if self.block_number == block_number { - false - } else { - self.clear(); - self.block_number = block_number; - true - } - } - - /// Computes the length of the matching prefix between cached transactions - /// and the provided transaction hashes. - /// - /// Returns the number of transactions that can be skipped because they - /// match the cached execution results. - pub fn matching_prefix_len(&self, tx_hashes: &[B256]) -> usize { - self.executed_tx_hashes - .iter() - .zip(tx_hashes.iter()) - .take_while(|(cached, incoming)| cached == incoming) - .count() - } - - /// Returns cached state for resuming execution if the incoming transactions - /// have a matching prefix with the cache. - /// - /// Returns `Some((bundle, receipts, skip_count))` if there's a non-empty matching - /// prefix, where: - /// - `bundle` is the cumulative state after the matching prefix - /// - `receipts` is the receipts for the matching prefix - /// - `skip_count` is the number of transactions to skip - /// - /// Returns `None` if: - /// - The cache is empty - /// - No prefix matches (first transaction differs) - /// - Block number doesn't match - pub fn get_resumable_state( - &self, - block_number: u64, - tx_hashes: &[B256], - ) -> Option> { - self.get_resumable_state_with_execution_meta(block_number, tx_hashes) - .map(|(bundle, receipts, .., skip_count)| (bundle, receipts, skip_count)) - } - - /// Returns cached state and execution metadata for resuming execution if the incoming - /// transactions have a matching prefix with the cache. - /// - /// Returns `Some((bundle, receipts, requests, gas_used, blob_gas_used, skip_count))` if - /// there's a non-empty matching prefix and the entire cache matches the incoming prefix. - pub(crate) fn get_resumable_state_with_execution_meta( - &self, - block_number: u64, - tx_hashes: &[B256], - ) -> Option> { - if !self.is_valid_for_block(block_number) || self.is_empty() { - return None; - } - - let prefix_len = self.matching_prefix_len(tx_hashes); - if prefix_len == 0 { - return None; - } - - // Only return state if the full cache matches (partial prefix would need - // intermediate state snapshots, which we don't currently store). - // Partial match means incoming txs diverge from cache, need to re-execute. - (prefix_len == self.executed_tx_hashes.len()).then_some(( - &self.cumulative_bundle, - self.receipts.as_slice(), - &self.execution_meta.requests, - self.execution_meta.gas_used, - self.execution_meta.blob_gas_used, - prefix_len, - )) - } - - /// Returns cached state and execution metadata for resuming execution if the incoming - /// transactions have a matching prefix with the cache and the parent hash matches. - /// - /// Returns `Some((bundle, receipts, requests, gas_used, blob_gas_used, skip_count))` if - /// there's a non-empty matching prefix, the full cache matches the incoming prefix, and the - /// `(block_number, parent_hash)` tuple matches the cached scope. - pub(crate) fn get_resumable_state_with_execution_meta_for_parent( - &self, - block_number: u64, - parent_hash: B256, - tx_hashes: &[B256], - ) -> Option> { - if !self.is_valid_for_block_parent(block_number, parent_hash) || self.is_empty() { - return None; - } - - let prefix_len = self.matching_prefix_len(tx_hashes); - if prefix_len == 0 { - return None; - } - - (prefix_len == self.executed_tx_hashes.len()).then_some(( - &self.cumulative_bundle, - self.receipts.as_slice(), - &self.execution_meta.requests, - self.execution_meta.gas_used, - self.execution_meta.blob_gas_used, - prefix_len, - )) - } - - /// Updates the cache with new execution results. - /// - /// This should be called after executing a flashblock. The provided bundle - /// and receipts should represent the cumulative state after all transactions. - pub fn update( - &mut self, - block_number: u64, - tx_hashes: Vec, - bundle: BundleState, - receipts: Vec, - ) { - self.update_with_execution_meta( - block_number, - tx_hashes, - bundle, - receipts, - CachedExecutionMeta::default(), - ); - } - - /// Updates the cache with new execution results and block-level metadata. - pub(crate) fn update_with_execution_meta( - &mut self, - block_number: u64, - tx_hashes: Vec, - bundle: BundleState, - receipts: Vec, - execution_meta: CachedExecutionMeta, - ) { - self.block_number = block_number; - self.cached_parent_hash = None; - self.executed_tx_hashes = tx_hashes; - self.cumulative_bundle = bundle; - self.receipts = receipts; - self.execution_meta = execution_meta; - } - - /// Updates the cache with new execution results and block-level metadata, scoped to the - /// provided parent hash. - pub(crate) fn update_with_execution_meta_for_parent( - &mut self, - block_number: u64, - parent_hash: B256, - tx_hashes: Vec, - bundle: BundleState, - receipts: Vec, - execution_meta: CachedExecutionMeta, - ) { - self.block_number = block_number; - self.cached_parent_hash = Some(parent_hash); - self.executed_tx_hashes = tx_hashes; - self.cumulative_bundle = bundle; - self.receipts = receipts; - self.execution_meta = execution_meta; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use reth_optimism_primitives::OpPrimitives; - - type TestCache = TransactionCache; - - #[test] - fn test_cache_block_validation() { - let mut cache = TestCache::for_block(100); - assert!(cache.is_valid_for_block(100)); - assert!(!cache.is_valid_for_block(101)); - assert!(!cache.is_valid_for_block_parent(100, B256::repeat_byte(0x11))); - - // Update for same block doesn't clear - assert!(!cache.update_for_block(100)); - - // Update for different block clears - assert!(cache.update_for_block(101)); - assert!(cache.is_valid_for_block(101)); - assert!(cache.parent_hash().is_none()); - } - - #[test] - fn test_cache_clear() { - let mut cache = TestCache::for_block(100); - assert_eq!(cache.block_number(), 100); - - cache.clear(); - assert_eq!(cache.block_number(), 0); - assert!(cache.is_empty()); - } - - #[test] - fn test_matching_prefix_len() { - let mut cache = TestCache::for_block(100); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - let tx_d = B256::repeat_byte(0xDD); - - // Update cache with [A, B] - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); - - // Full match - assert_eq!(cache.matching_prefix_len(&[tx_a, tx_b]), 2); - - // Continuation - assert_eq!(cache.matching_prefix_len(&[tx_a, tx_b, tx_c]), 2); - - // Partial match (reorg at position 1) - assert_eq!(cache.matching_prefix_len(&[tx_a, tx_d, tx_c]), 1); - - // No match (reorg at position 0) - assert_eq!(cache.matching_prefix_len(&[tx_d, tx_b, tx_c]), 0); - - // Empty incoming - assert_eq!(cache.matching_prefix_len(&[]), 0); - } - - #[test] - fn test_get_resumable_state() { - let mut cache = TestCache::for_block(100); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - - // Empty cache returns None - assert!(cache.get_resumable_state(100, &[tx_a, tx_b]).is_none()); - - // Update cache with [A, B] - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); - - // Wrong block number returns None - assert!(cache.get_resumable_state(101, &[tx_a, tx_b]).is_none()); - - // Exact match returns state - let result = cache.get_resumable_state(100, &[tx_a, tx_b]); - assert!(result.is_some()); - let (_, _, skip) = result.unwrap(); - assert_eq!(skip, 2); - - // Continuation returns state (can skip cached txs) - let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); - assert!(result.is_some()); - let (_, _, skip) = result.unwrap(); - assert_eq!(skip, 2); - - // Partial match (reorg) returns None - can't use partial cache - assert!(cache.get_resumable_state(100, &[tx_a, tx_c]).is_none()); - } - - // ==================== E2E Cache Reuse Scenario Tests ==================== - - /// Tests the complete E2E cache scenario: fb0 [A,B] → fb1 [A,B,C] - /// Verifies that cached bundle can be used as prestate for the continuation. - #[test] - fn test_e2e_cache_reuse_continuation_scenario() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - - // Simulate fb0: execute [A, B] from scratch - let fb0_txs = vec![tx_a, tx_b]; - assert!(cache.get_resumable_state(100, &fb0_txs).is_none()); - - // After fb0 execution, update cache - cache.update(100, fb0_txs, BundleState::default(), vec![]); - assert_eq!(cache.len(), 2); - - // Simulate fb1: [A, B, C] - should resume from cached state - let fb1_txs = vec![tx_a, tx_b, tx_c]; - let result = cache.get_resumable_state(100, &fb1_txs); - assert!(result.is_some()); - let (bundle, receipts, skip) = result.unwrap(); - - // skip=2 indicates 2 txs are covered by cached state (for logging) - // Note: All transactions are still executed, skip is informational only - assert_eq!(skip, 2); - // Bundle is used as prestate to warm the State builder - assert!(bundle.state.is_empty()); // Default bundle is empty in test - assert!(receipts.is_empty()); // No receipts in this test - - // After fb1 execution, update cache with full list - cache.update(100, fb1_txs, BundleState::default(), vec![]); - assert_eq!(cache.len(), 3); - } - - /// Tests reorg scenario: fb0 [A, B] → fb1 [A, D, E] - /// Verifies that divergent tx list invalidates cache. - #[test] - fn test_e2e_cache_reorg_scenario() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_d = B256::repeat_byte(0xDD); - let tx_e = B256::repeat_byte(0xEE); - - // fb0: execute [A, B] - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); - - // fb1 (reorg): [A, D, E] - tx[1] diverges, cannot resume - let fb1_txs = vec![tx_a, tx_d, tx_e]; - let result = cache.get_resumable_state(100, &fb1_txs); - assert!(result.is_none()); // Partial match means we can't use cache - } - - /// Tests multi-flashblock progression within same block: - /// fb0 [A] → fb1 [A,B] → fb2 [A,B,C] - /// - /// Each flashblock can use the previous bundle as prestate for warm state reads. - /// Note: All transactions are still executed; skip count is for logging only. - #[test] - fn test_e2e_multi_flashblock_progression() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - - // fb0: [A] - cache.update(100, vec![tx_a], BundleState::default(), vec![]); - assert_eq!(cache.len(), 1); - - // fb1: [A, B] - cached state covers [A] (skip=1 for logging) - let fb1_txs = vec![tx_a, tx_b]; - let result = cache.get_resumable_state(100, &fb1_txs); - assert!(result.is_some()); - assert_eq!(result.unwrap().2, 1); // 1 tx covered by cache - - cache.update(100, fb1_txs, BundleState::default(), vec![]); - assert_eq!(cache.len(), 2); - - // fb2: [A, B, C] - cached state covers [A, B] (skip=2 for logging) - let fb2_txs = vec![tx_a, tx_b, tx_c]; - let result = cache.get_resumable_state(100, &fb2_txs); - assert!(result.is_some()); - assert_eq!(result.unwrap().2, 2); // 2 txs covered by cache - - cache.update(100, fb2_txs, BundleState::default(), vec![]); - assert_eq!(cache.len(), 3); - } - - /// Tests that cache is invalidated on block number change. - #[test] - fn test_e2e_block_transition_clears_cache() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - - // Block 100: cache [A, B] - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); - assert_eq!(cache.len(), 2); - - // Block 101: same txs shouldn't resume (different block) - let result = cache.get_resumable_state(101, &[tx_a, tx_b]); - assert!(result.is_none()); - - // Explicit block update clears cache - cache.update_for_block(101); - assert!(cache.is_empty()); - } - - /// Tests cache behavior with empty transaction list. - #[test] - fn test_cache_empty_transactions() { - let mut cache = TestCache::new(); - - // Empty flashblock (only system tx, no user txs) - cache.update(100, vec![], BundleState::default(), vec![]); - assert!(cache.is_empty()); - - // Can't resume from empty cache - let tx_a = B256::repeat_byte(0xAA); - assert!(cache.get_resumable_state(100, &[tx_a]).is_none()); - } - - /// Documents the semantics of `skip_count`. - /// - /// A resumable state is only returned when the incoming transaction list fully extends the - /// cached list. In that case, `skip_count` is the number of prefix transactions covered by - /// cached execution output. - #[test] - fn test_skip_count_matches_cached_prefix_len() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - - // Cache state after executing [A, B] - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); - - // get_resumable_state returns skip=2 for prefix [A, B] - let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); - assert!(result.is_some()); - let (bundle, _receipts, skip_count) = result.unwrap(); - - // skip_count indicates cached prefix length - assert_eq!(skip_count, 2); - - // The bundle is the important part - used as resumable prestate. - assert!(bundle.state.is_empty()); // Default in test, real one has state - } - - /// Tests that receipts are properly cached and returned. - #[test] - fn test_cache_preserves_receipts() { - use op_alloy_consensus::OpReceipt; - use reth_optimism_primitives::OpPrimitives; - - let mut cache: TransactionCache = TransactionCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - - // Create mock receipts - let receipt_a = OpReceipt::Legacy(alloy_consensus::Receipt { - status: alloy_consensus::Eip658Value::Eip658(true), - cumulative_gas_used: 21000, - logs: vec![], - }); - let receipt_b = OpReceipt::Legacy(alloy_consensus::Receipt { - status: alloy_consensus::Eip658Value::Eip658(true), - cumulative_gas_used: 42000, - logs: vec![], - }); - - cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![receipt_a, receipt_b]); - - // Verify receipts are preserved - assert_eq!(cache.receipts().len(), 2); - - // On resumable state, receipts are returned - let tx_c = B256::repeat_byte(0xCC); - let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); - assert!(result.is_some()); - let (_, receipts, _) = result.unwrap(); - assert_eq!(receipts.len(), 2); - } - - #[test] - fn test_cache_preserves_execution_meta() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - - let mut requests = Requests::default(); - requests.push_request_with_type(0x01, [0xAA, 0xBB]); - - cache.update_with_execution_meta( - 100, - vec![tx_a, tx_b], - BundleState::default(), - vec![], - CachedExecutionMeta { - requests: requests.clone(), - gas_used: 42_000, - blob_gas_used: 123, - }, - ); - - let resumable = cache.get_resumable_state_with_execution_meta(100, &[tx_a, tx_b, tx_c]); - assert!(resumable.is_some()); - let (_, _, cached_requests, gas_used, blob_gas_used, skip_count) = resumable.unwrap(); - assert_eq!(skip_count, 2); - assert_eq!(gas_used, 42_000); - assert_eq!(blob_gas_used, 123); - assert_eq!(cached_requests, &requests); - } - - #[test] - fn test_cache_parent_scoping() { - let mut cache = TestCache::new(); - - let tx_a = B256::repeat_byte(0xAA); - let tx_b = B256::repeat_byte(0xBB); - let tx_c = B256::repeat_byte(0xCC); - let parent_a = B256::repeat_byte(0x11); - let parent_b = B256::repeat_byte(0x22); - - cache.update_with_execution_meta_for_parent( - 100, - parent_a, - vec![tx_a, tx_b], - BundleState::default(), - vec![], - CachedExecutionMeta { - requests: Requests::default(), - gas_used: 42_000, - blob_gas_used: 0, - }, - ); - - // Matching block + parent should hit. - let hit = cache.get_resumable_state_with_execution_meta_for_parent( - 100, - parent_a, - &[tx_a, tx_b, tx_c], - ); - assert!(hit.is_some()); - - // Same block but different parent should miss. - let miss = cache.get_resumable_state_with_execution_meta_for_parent( - 100, - parent_b, - &[tx_a, tx_b, tx_c], - ); - assert!(miss.is_none()); - } -} diff --git a/crates/flashblocks/src/validation.rs b/crates/flashblocks/src/validation.rs deleted file mode 100644 index d5012eba..00000000 --- a/crates/flashblocks/src/validation.rs +++ /dev/null @@ -1,599 +0,0 @@ -//! Flashblock sequence validation and reorganization detection. -//! -//! Provides stateless validation logic for flashblock sequencing and chain reorg detection. -//! -//! This module contains three main components: -//! -//! 1. [`FlashblockSequenceValidator`] - Validates that incoming flashblocks follow the expected -//! sequence ordering (consecutive indices within a block, proper block transitions). -//! -//! 2. [`ReorgDetector`] - Detects chain reorganizations by comparing full block fingerprints (block -//! hash, parent hash, and transaction hashes) between tracked (pending) state and canonical -//! chain state. -//! -//! 3. [`CanonicalBlockReconciler`] - Determines the appropriate strategy for reconciling pending -//! flashblock state when new canonical blocks arrive. - -use alloy_primitives::B256; - -/// Result of validating a flashblock's position in the sequence. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum SequenceValidationResult { - /// Next consecutive flashblock within the current block (same block, index + 1). - NextInSequence, - /// First flashblock (index 0) of the next block (block + 1). - FirstOfNextBlock, - /// Duplicate flashblock (same block and index) - should be ignored. - Duplicate, - /// Non-sequential index within the same block - indicates missed flashblocks. - NonSequentialGap { - /// Expected flashblock index. - expected: u64, - /// Actual incoming flashblock index. - actual: u64, - }, - /// New block received with non-zero index - missed the base flashblock. - InvalidNewBlockIndex { - /// Block number of the incoming flashblock. - block_number: u64, - /// The invalid (non-zero) index received. - index: u64, - }, -} - -/// Stateless validator for flashblock sequence ordering. -/// -/// Flashblocks must arrive in strict sequential order: -/// - Within a block: indices must be consecutive (0, 1, 2, ...) -/// - Across blocks: new block must start with index 0 and be exactly `block_number + 1` -/// -/// # Example -/// -/// ``` -/// use xlayer_flashblocks::validation::{ -/// FlashblockSequenceValidator, SequenceValidationResult, -/// }; -/// -/// // Valid: next flashblock in sequence -/// let result = FlashblockSequenceValidator::validate(100, 2, 100, 3); -/// assert_eq!(result, SequenceValidationResult::NextInSequence); -/// -/// // Valid: first flashblock of next block -/// let result = FlashblockSequenceValidator::validate(100, 5, 101, 0); -/// assert_eq!(result, SequenceValidationResult::FirstOfNextBlock); -/// -/// // Invalid: gap in sequence -/// let result = FlashblockSequenceValidator::validate(100, 2, 100, 5); -/// assert!(matches!(result, SequenceValidationResult::NonSequentialGap { .. })); -/// ``` -#[derive(Debug, Clone, Copy, Default)] -pub struct FlashblockSequenceValidator; - -impl FlashblockSequenceValidator { - /// Validates whether an incoming flashblock follows the expected sequence. - /// - /// Returns the appropriate [`SequenceValidationResult`] based on: - /// - Same block, index + 1 → `NextInSequence` - /// - Next block, index 0 → `FirstOfNextBlock` - /// - Same block and index → `Duplicate` - /// - Same block, wrong index → `NonSequentialGap` - /// - Different block, non-zero index or block gap → `InvalidNewBlockIndex` - pub const fn validate( - latest_block_number: u64, - latest_flashblock_index: u64, - incoming_block_number: u64, - incoming_index: u64, - ) -> SequenceValidationResult { - // Next flashblock within the current block - if incoming_block_number == latest_block_number - && incoming_index == latest_flashblock_index + 1 - { - SequenceValidationResult::NextInSequence - // First flashblock of the next block - } else if incoming_block_number == latest_block_number + 1 && incoming_index == 0 { - SequenceValidationResult::FirstOfNextBlock - // New block with non-zero index or block gap - } else if incoming_block_number != latest_block_number { - SequenceValidationResult::InvalidNewBlockIndex { - block_number: incoming_block_number, - index: incoming_index, - } - } else if incoming_index == latest_flashblock_index { - // Duplicate flashblock - SequenceValidationResult::Duplicate - } else { - // Non-sequential index within the same block - SequenceValidationResult::NonSequentialGap { - expected: latest_flashblock_index + 1, - actual: incoming_index, - } - } - } -} - -/// Fingerprint for a tracked block (pending/cached sequence). -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct TrackedBlockFingerprint { - /// Block number. - pub block_number: u64, - /// Block hash. - pub block_hash: B256, - /// Parent hash. - pub parent_hash: B256, - /// Ordered transaction hashes in the block. - pub tx_hashes: Vec, -} - -/// Fingerprint for a canonical block notification. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct CanonicalBlockFingerprint { - /// Block number. - pub block_number: u64, - /// Block hash. - pub block_hash: B256, - /// Parent hash. - pub parent_hash: B256, - /// Ordered transaction hashes in the block. - pub tx_hashes: Vec, -} - -/// Result of a reorganization detection check. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum ReorgDetectionResult { - /// Tracked and canonical fingerprints match exactly. - NoReorg, - /// Tracked and canonical fingerprints differ. - ReorgDetected, -} - -impl ReorgDetectionResult { - /// Returns `true` if a reorganization was detected. - #[inline] - pub const fn is_reorg(&self) -> bool { - matches!(self, Self::ReorgDetected) - } - - /// Returns `true` if no reorganization was detected. - #[inline] - pub const fn is_no_reorg(&self) -> bool { - matches!(self, Self::NoReorg) - } -} - -/// Detects chain reorganizations by comparing full block fingerprints. -/// -/// A reorg is detected when any fingerprint component differs: -/// - Block hash -/// - Parent hash -/// - Transaction hash list (including ordering) -/// -/// # Example -/// -/// ``` -/// use alloy_primitives::B256; -/// use xlayer_flashblocks::validation::{ -/// CanonicalBlockFingerprint, ReorgDetectionResult, ReorgDetector, TrackedBlockFingerprint, -/// }; -/// -/// let tracked = TrackedBlockFingerprint { -/// block_number: 100, -/// block_hash: B256::repeat_byte(0xAA), -/// parent_hash: B256::repeat_byte(0x11), -/// tx_hashes: vec![B256::repeat_byte(1), B256::repeat_byte(2)], -/// }; -/// let canonical = CanonicalBlockFingerprint { -/// block_number: 100, -/// block_hash: B256::repeat_byte(0xAA), -/// parent_hash: B256::repeat_byte(0x11), -/// tx_hashes: vec![B256::repeat_byte(1), B256::repeat_byte(2)], -/// }; -/// -/// let result = ReorgDetector::detect(&tracked, &canonical); -/// assert_eq!(result, ReorgDetectionResult::NoReorg); -/// ``` -#[derive(Debug, Clone, Copy, Default)] -pub struct ReorgDetector; - -impl ReorgDetector { - /// Compares tracked vs canonical block fingerprints to detect reorgs. - pub fn detect( - tracked: &TrackedBlockFingerprint, - canonical: &CanonicalBlockFingerprint, - ) -> ReorgDetectionResult { - if tracked.block_hash == canonical.block_hash - && tracked.parent_hash == canonical.parent_hash - && tracked.tx_hashes == canonical.tx_hashes - { - ReorgDetectionResult::NoReorg - } else { - ReorgDetectionResult::ReorgDetected - } - } -} - -/// Strategy for reconciling pending state with canonical state on new canonical blocks. -/// -/// When a new canonical block arrives, the system must decide how to update -/// the pending flashblock state. This enum represents the possible strategies. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum ReconciliationStrategy { - /// Canonical caught up or passed pending (canonical >= latest pending). Clear pending state. - CatchUp, - /// Reorg detected (tx mismatch). Rebuild pending from canonical. - HandleReorg, - /// Pending too far ahead of canonical. - DepthLimitExceeded { - /// Current depth of pending blocks. - depth: u64, - /// Configured maximum depth. - max_depth: u64, - }, - /// No issues - continue building on pending state. - Continue, - /// No pending state exists (startup or after clear). - NoPendingState, -} - -/// Determines reconciliation strategy for canonical block updates. -/// -/// This reconciler helps maintain consistency between pending flashblock state -/// and the canonical chain. It's used when new canonical blocks arrive to -/// determine whether to: -/// - Clear pending state (canonical caught up) -/// - Rebuild pending state (reorg detected) -/// - Continue as-is (pending still ahead and valid) -/// -/// # Priority Order -/// -/// The reconciler checks conditions in this order: -/// 1. `NoPendingState` - No pending state to reconcile -/// 2. `CatchUp` - Canonical has caught up to or passed pending -/// 3. `HandleReorg` - Reorg detected (takes precedence over depth limit) -/// 4. `DepthLimitExceeded` - Pending is too far ahead -/// 5. `Continue` - Everything is fine, keep building -/// -/// # Example -/// -/// ``` -/// use xlayer_flashblocks::validation::{CanonicalBlockReconciler, ReconciliationStrategy}; -/// -/// // Canonical caught up to pending -/// let strategy = CanonicalBlockReconciler::reconcile( -/// Some(100), // earliest pending -/// Some(105), // latest pending -/// 105, // canonical block number -/// 10, // max depth -/// false, // no reorg detected -/// ); -/// assert_eq!(strategy, ReconciliationStrategy::CatchUp); -/// ``` -#[derive(Debug, Clone, Copy, Default)] -pub struct CanonicalBlockReconciler; - -impl CanonicalBlockReconciler { - /// Returns the appropriate [`ReconciliationStrategy`] based on pending vs canonical state. - /// - /// Priority: `NoPendingState` → `CatchUp` → `HandleReorg` → `DepthLimitExceeded` → `Continue` - pub const fn reconcile( - pending_earliest_block: Option, - pending_latest_block: Option, - canonical_block_number: u64, - max_depth: u64, - reorg_detected: bool, - ) -> ReconciliationStrategy { - // Check if pending state exists - let latest = match (pending_earliest_block, pending_latest_block) { - (Some(_e), Some(l)) => l, - _ => return ReconciliationStrategy::NoPendingState, - }; - - // Check if canonical has caught up or passed pending - if latest <= canonical_block_number { - return ReconciliationStrategy::CatchUp; - } - - // Check for reorg - if reorg_detected { - return ReconciliationStrategy::HandleReorg; - } - - // Check depth limit: how many pending blocks are ahead of canonical tip. - let depth = latest.saturating_sub(canonical_block_number); - if depth > max_depth { - return ReconciliationStrategy::DepthLimitExceeded { depth, max_depth }; - } - - // No issues, continue building - ReconciliationStrategy::Continue - } -} - -#[cfg(test)] -mod tests { - use super::*; - - // ==================== FlashblockSequenceValidator Tests ==================== - - mod sequence_validator { - use super::*; - - #[test] - fn test_next_in_sequence() { - // Consecutive indices within the same block - assert_eq!( - FlashblockSequenceValidator::validate(100, 2, 100, 3), - SequenceValidationResult::NextInSequence - ); - assert_eq!( - FlashblockSequenceValidator::validate(100, 0, 100, 1), - SequenceValidationResult::NextInSequence - ); - assert_eq!( - FlashblockSequenceValidator::validate(100, 999, 100, 1000), - SequenceValidationResult::NextInSequence - ); - assert_eq!( - FlashblockSequenceValidator::validate(0, 0, 0, 1), - SequenceValidationResult::NextInSequence - ); - } - - #[test] - fn test_first_of_next_block() { - // Index 0 of the next block - assert_eq!( - FlashblockSequenceValidator::validate(0, 0, 1, 0), - SequenceValidationResult::FirstOfNextBlock - ); - assert_eq!( - FlashblockSequenceValidator::validate(100, 5, 101, 0), - SequenceValidationResult::FirstOfNextBlock - ); - assert_eq!( - FlashblockSequenceValidator::validate(100, 0, 101, 0), - SequenceValidationResult::FirstOfNextBlock - ); - assert_eq!( - FlashblockSequenceValidator::validate(999999, 10, 1000000, 0), - SequenceValidationResult::FirstOfNextBlock - ); - } - - #[test] - fn test_duplicate() { - // Same block and index - assert_eq!( - FlashblockSequenceValidator::validate(100, 5, 100, 5), - SequenceValidationResult::Duplicate - ); - assert_eq!( - FlashblockSequenceValidator::validate(100, 0, 100, 0), - SequenceValidationResult::Duplicate - ); - } - - #[test] - fn test_non_sequential_gap() { - // Non-consecutive indices within the same block - assert_eq!( - FlashblockSequenceValidator::validate(100, 2, 100, 4), - SequenceValidationResult::NonSequentialGap { expected: 3, actual: 4 } - ); - assert_eq!( - FlashblockSequenceValidator::validate(100, 0, 100, 10), - SequenceValidationResult::NonSequentialGap { expected: 1, actual: 10 } - ); - // Going backwards - assert_eq!( - FlashblockSequenceValidator::validate(100, 5, 100, 3), - SequenceValidationResult::NonSequentialGap { expected: 6, actual: 3 } - ); - } - - #[test] - fn test_invalid_new_block_index() { - // New block with non-zero index - assert_eq!( - FlashblockSequenceValidator::validate(100, 5, 101, 1), - SequenceValidationResult::InvalidNewBlockIndex { block_number: 101, index: 1 } - ); - // Block gap - assert_eq!( - FlashblockSequenceValidator::validate(100, 5, 105, 3), - SequenceValidationResult::InvalidNewBlockIndex { block_number: 105, index: 3 } - ); - assert_eq!( - FlashblockSequenceValidator::validate(100, 5, 102, 0), - SequenceValidationResult::InvalidNewBlockIndex { block_number: 102, index: 0 } - ); - // Going backwards in block number - assert_eq!( - FlashblockSequenceValidator::validate(100, 5, 99, 0), - SequenceValidationResult::InvalidNewBlockIndex { block_number: 99, index: 0 } - ); - } - } - - // ==================== ReorgDetector Tests ==================== - - mod reorg_detector { - use super::*; - - fn tracked( - block_hash: B256, - parent_hash: B256, - tx_hashes: Vec, - ) -> TrackedBlockFingerprint { - TrackedBlockFingerprint { block_number: 100, block_hash, parent_hash, tx_hashes } - } - - fn canonical( - block_hash: B256, - parent_hash: B256, - tx_hashes: Vec, - ) -> CanonicalBlockFingerprint { - CanonicalBlockFingerprint { block_number: 100, block_hash, parent_hash, tx_hashes } - } - - #[test] - fn test_no_reorg_identical_fingerprint() { - let hashes = vec![B256::repeat_byte(0x01), B256::repeat_byte(0x02)]; - let tracked = tracked(B256::repeat_byte(0xAA), B256::repeat_byte(0x11), hashes.clone()); - let canonical = canonical(B256::repeat_byte(0xAA), B256::repeat_byte(0x11), hashes); - assert_eq!(ReorgDetector::detect(&tracked, &canonical), ReorgDetectionResult::NoReorg); - } - - #[test] - fn test_reorg_on_parent_hash_mismatch_with_identical_txs() { - let hashes = vec![B256::repeat_byte(0x01), B256::repeat_byte(0x02)]; - let tracked = tracked(B256::repeat_byte(0xAA), B256::repeat_byte(0x11), hashes.clone()); - let canonical = canonical(B256::repeat_byte(0xAA), B256::repeat_byte(0x22), hashes); - - assert_eq!( - ReorgDetector::detect(&tracked, &canonical), - ReorgDetectionResult::ReorgDetected - ); - } - - #[test] - fn test_reorg_on_block_hash_mismatch_with_identical_txs() { - let hashes = vec![B256::repeat_byte(0x01), B256::repeat_byte(0x02)]; - let tracked = tracked(B256::repeat_byte(0xAA), B256::repeat_byte(0x11), hashes.clone()); - let canonical = canonical(B256::repeat_byte(0xBB), B256::repeat_byte(0x11), hashes); - - assert_eq!( - ReorgDetector::detect(&tracked, &canonical), - ReorgDetectionResult::ReorgDetected - ); - } - - #[test] - fn test_reorg_on_tx_hash_mismatch() { - let tracked = tracked( - B256::repeat_byte(0xAA), - B256::repeat_byte(0x11), - vec![B256::repeat_byte(0x01), B256::repeat_byte(0x02)], - ); - let canonical = canonical( - B256::repeat_byte(0xAA), - B256::repeat_byte(0x11), - vec![B256::repeat_byte(0x01), B256::repeat_byte(0x03)], - ); - - assert_eq!( - ReorgDetector::detect(&tracked, &canonical), - ReorgDetectionResult::ReorgDetected - ); - } - - #[test] - fn test_result_helpers() { - let no_reorg = ReorgDetectionResult::NoReorg; - assert!(no_reorg.is_no_reorg()); - assert!(!no_reorg.is_reorg()); - - let reorg = ReorgDetectionResult::ReorgDetected; - assert!(reorg.is_reorg()); - assert!(!reorg.is_no_reorg()); - } - } - - // ==================== CanonicalBlockReconciler Tests ==================== - - mod reconciler { - use super::*; - - #[test] - fn test_no_pending_state() { - assert_eq!( - CanonicalBlockReconciler::reconcile(None, None, 100, 10, false), - ReconciliationStrategy::NoPendingState - ); - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), None, 100, 10, false), - ReconciliationStrategy::NoPendingState - ); - assert_eq!( - CanonicalBlockReconciler::reconcile(None, Some(100), 100, 10, false), - ReconciliationStrategy::NoPendingState - ); - } - - #[test] - fn test_catchup() { - // Canonical equals latest pending - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(105), 105, 10, false), - ReconciliationStrategy::CatchUp - ); - // Canonical passed latest pending - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(105), 110, 10, false), - ReconciliationStrategy::CatchUp - ); - // Single pending block - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(100), 100, 10, false), - ReconciliationStrategy::CatchUp - ); - // CatchUp takes priority over reorg - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(105), 105, 10, true), - ReconciliationStrategy::CatchUp - ); - } - - #[test] - fn test_handle_reorg() { - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(110), 102, 10, true), - ReconciliationStrategy::HandleReorg - ); - // Reorg takes priority over depth limit - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(130), 120, 10, true), - ReconciliationStrategy::HandleReorg - ); - } - - #[test] - fn test_depth_limit_exceeded() { - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(120), 115, 10, false), - ReconciliationStrategy::Continue - ); - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(105), 101, 0, false), - ReconciliationStrategy::DepthLimitExceeded { depth: 4, max_depth: 0 } - ); - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(200), 130, 64, false), - ReconciliationStrategy::DepthLimitExceeded { depth: 70, max_depth: 64 } - ); - } - - #[test] - fn test_continue() { - // Normal case: pending ahead of canonical - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(110), 105, 10, false), - ReconciliationStrategy::Continue - ); - // Exactly at depth limit - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(120), 110, 10, false), - ReconciliationStrategy::Continue - ); - // Canonical at earliest pending - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(105), 100, 10, false), - ReconciliationStrategy::Continue - ); - // Zero depth is OK with max_depth=0 - assert_eq!( - CanonicalBlockReconciler::reconcile(Some(100), Some(105), 100, 0, false), - ReconciliationStrategy::DepthLimitExceeded { depth: 5, max_depth: 0 } - ); - } - } -} diff --git a/crates/flashblocks/src/worker.rs b/crates/flashblocks/src/worker.rs deleted file mode 100644 index 957f0333..00000000 --- a/crates/flashblocks/src/worker.rs +++ /dev/null @@ -1,700 +0,0 @@ -use crate::{ - pending_state::PendingBlockState, - tx_cache::{CachedExecutionMeta, TransactionCache}, - PendingFlashBlock, -}; -use alloy_eips::{eip2718::WithEncoded, BlockNumberOrTag}; -use alloy_primitives::B256; -use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; -use reth_chain_state::{ComputedTrieData, ExecutedBlock}; -use reth_errors::RethError; -use reth_evm::{ - execute::{ - BlockAssembler, BlockAssemblerInput, BlockBuilder, BlockBuilderOutcome, BlockExecutor, - }, - ConfigureEvm, Evm, -}; -use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; -use reth_optimism_primitives::OpReceipt; -use reth_primitives_traits::{ - transaction::TxHashRef, AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy, - Recovered, RecoveredBlock, SealedHeader, -}; -use reth_revm::{ - cached::CachedReads, - database::StateProviderDatabase, - db::{states::bundle_state::BundleRetention, BundleState, State}, -}; -use reth_rpc_eth_types::{EthApiError, PendingBlock}; -use reth_storage_api::{ - noop::NoopProvider, BlockReaderIdExt, HashedPostStateProvider, StateProviderFactory, - StateRootProvider, -}; -use std::{ - sync::Arc, - time::{Duration, Instant}, -}; -use tracing::trace; - -/// The `FlashBlockBuilder` builds [`PendingBlock`] out of a sequence of transactions. -#[derive(Debug)] -pub(crate) struct FlashBlockBuilder { - evm_config: EvmConfig, - provider: Provider, -} - -impl FlashBlockBuilder { - pub(crate) const fn new(evm_config: EvmConfig, provider: Provider) -> Self { - Self { evm_config, provider } - } - - pub(crate) const fn provider(&self) -> &Provider { - &self.provider - } -} - -pub(crate) struct BuildArgs { - pub(crate) base: OpFlashblockPayloadBase, - pub(crate) transactions: I, - pub(crate) cached_state: Option<(B256, CachedReads)>, - pub(crate) last_flashblock_index: u64, - pub(crate) last_flashblock_hash: B256, - pub(crate) compute_state_root: bool, - /// Optional pending parent state for speculative building. - /// When set, allows building on top of a pending block that hasn't been - /// canonicalized yet. - pub(crate) pending_parent: Option>, -} - -/// Result of a flashblock build operation. -#[derive(Debug)] -pub(crate) struct BuildResult { - /// The built pending flashblock. - pub(crate) pending_flashblock: PendingFlashBlock, - /// Cached reads from this build. - pub(crate) cached_reads: CachedReads, - /// Pending state that can be used for building subsequent blocks. - pub(crate) pending_state: PendingBlockState, -} - -/// Cached prefix execution data used to resume canonical builds. -#[derive(Debug, Clone)] -struct CachedPrefixExecutionResult { - /// Number of leading transactions covered by cached execution. - cached_tx_count: usize, - /// Cumulative bundle state after executing the cached prefix. - bundle: BundleState, - /// Cached receipts for the prefix. - receipts: Vec, - /// Total gas used by the cached prefix. - gas_used: u64, - /// Total blob/DA gas used by the cached prefix. - blob_gas_used: u64, -} - -/// Receipt requirements for cache-resume flow. -pub trait FlashblockCachedReceipt: Clone { - /// Adds `gas_offset` to each receipt's `cumulative_gas_used`. - fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64); -} - -impl FlashblockCachedReceipt for OpReceipt { - fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64) { - if gas_offset == 0 { - return; - } - - for receipt in receipts { - let inner = receipt.as_receipt_mut(); - inner.cumulative_gas_used = inner.cumulative_gas_used.saturating_add(gas_offset); - } - } -} - -impl FlashBlockBuilder -where - N: NodePrimitives, - N::Receipt: FlashblockCachedReceipt, - EvmConfig: ConfigureEvm + Unpin>, - Provider: StateProviderFactory - + BlockReaderIdExt< - Header = HeaderTy, - Block = BlockTy, - Transaction = N::SignedTx, - Receipt = ReceiptTy, - > + Unpin, -{ - /// Returns the [`PendingFlashBlock`] made purely out of transactions and - /// [`OpFlashblockPayloadBase`] in `args`. - /// - /// This method supports two building modes: - /// 1. **Canonical mode**: Parent matches local tip - uses state from storage - /// 2. **Speculative mode**: Parent is a pending block - uses pending state - /// - /// When a `tx_cache` is provided and we're in canonical mode, the builder will - /// attempt to resume from cached state if the transaction list is a continuation - /// of what was previously executed. - /// - /// Returns `None` if: - /// - In canonical mode: flashblock doesn't attach to the latest header - /// - In speculative mode: no pending parent state provided - pub(crate) fn execute>>>( - &self, - mut args: BuildArgs, - tx_cache: Option<&mut TransactionCache>, - ) -> eyre::Result>> { - trace!(target: "flashblocks", "Attempting new pending block from flashblocks"); - - let latest = self - .provider - .latest_header()? - .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; - let latest_hash = latest.hash(); - - // Determine build mode: canonical (parent is local tip) or speculative (parent is pending) - let is_canonical = args.base.parent_hash == latest_hash; - let has_pending_parent = args.pending_parent.is_some(); - - if !is_canonical && !has_pending_parent { - trace!( - target: "flashblocks", - flashblock_parent = ?args.base.parent_hash, - local_latest = ?latest.num_hash(), - "Skipping non-consecutive flashblock (no pending parent available)" - ); - return Ok(None); - } - - // Collect transactions and extract hashes for cache lookup - let transactions: Vec<_> = args.transactions.into_iter().collect(); - let tx_hashes: Vec = transactions.iter().map(|tx| *tx.tx_hash()).collect(); - - // Get state provider and parent header context. - // For speculative builds, use the canonical anchor hash (not the pending parent hash) - // for storage reads, but execute with the pending parent's sealed header context. - let (state_provider, canonical_anchor, parent_header) = if is_canonical { - (self.provider.history_by_block_hash(latest.hash())?, latest.hash(), &latest) - } else { - // For speculative building, we need to use the canonical anchor - // and apply the pending state's bundle on top of it - let pending = args.pending_parent.as_ref().unwrap(); - let Some(parent_header) = pending.sealed_header.as_ref() else { - trace!( - target: "flashblocks", - pending_block_number = pending.block_number, - pending_block_hash = ?pending.block_hash, - "Skipping speculative build: pending parent header is unavailable" - ); - return Ok(None); - }; - if !is_consistent_speculative_parent_hashes( - args.base.parent_hash, - pending.block_hash, - parent_header.hash(), - ) { - trace!( - target: "flashblocks", - incoming_parent_hash = ?args.base.parent_hash, - pending_block_hash = ?pending.block_hash, - pending_sealed_hash = ?parent_header.hash(), - pending_block_number = pending.block_number, - "Skipping speculative build: inconsistent pending parent hashes" - ); - return Ok(None); - } - trace!( - target: "flashblocks", - pending_block_number = pending.block_number, - pending_block_hash = ?pending.block_hash, - canonical_anchor = ?pending.canonical_anchor_hash, - "Building speculatively on pending state" - ); - ( - self.provider.history_by_block_hash(pending.canonical_anchor_hash)?, - pending.canonical_anchor_hash, - parent_header, - ) - }; - - // Set up cached reads - let cache_key = if is_canonical { latest_hash } else { args.base.parent_hash }; - let mut request_cache = args - .cached_state - .take() - .filter(|(hash, _)| hash == &cache_key) - .map(|(_, state)| state) - .unwrap_or_else(|| { - // For speculative builds, use cached reads from pending parent - args.pending_parent.as_ref().map(|p| p.cached_reads.clone()).unwrap_or_default() - }); - - let cached_db = request_cache.as_db_mut(StateProviderDatabase::new(&state_provider)); - - // Check for resumable canonical execution state. - let canonical_parent_hash = args.base.parent_hash; - let cached_prefix = if is_canonical { - tx_cache.as_ref().and_then(|cache| { - cache - .get_resumable_state_with_execution_meta_for_parent( - args.base.block_number, - canonical_parent_hash, - &tx_hashes, - ) - .map( - |( - bundle, - receipts, - _requests, - gas_used, - blob_gas_used, - cached_tx_count, - )| { - trace!( - target: "flashblocks", - cached_tx_count, - total_txs = tx_hashes.len(), - "Cache hit (executing only uncached suffix)" - ); - CachedPrefixExecutionResult { - cached_tx_count, - bundle: bundle.clone(), - receipts: receipts.to_vec(), - gas_used, - blob_gas_used, - } - }, - ) - }) - } else { - None - }; - - // Build state with appropriate prestate - // - Speculative builds use pending parent prestate - // - Canonical cache-hit builds use cached prefix prestate - let mut state = if let Some(ref pending) = args.pending_parent { - State::builder() - .with_database(cached_db) - .with_bundle_prestate(pending.execution_outcome.state.clone()) - .with_bundle_update() - .build() - } else if let Some(ref cached_prefix) = cached_prefix { - State::builder() - .with_database(cached_db) - .with_bundle_prestate(cached_prefix.bundle.clone()) - .with_bundle_update() - .build() - } else { - State::builder().with_database(cached_db).with_bundle_update().build() - }; - - let (execution_result, block, hashed_state, bundle) = if let Some(cached_prefix) = - cached_prefix - { - // Cached prefix execution model: - // - The cached bundle prestate already includes pre-execution state changes - // (blockhash/beacon root updates, create2deployer), so we do NOT call - // apply_pre_execution_changes() again. - // - The only pre-execution effect we need is set_state_clear_flag, which configures EVM - // empty-account handling (OP Stack chains activate Spurious Dragon at genesis, so - // this is always true). - // - Suffix transactions execute against the warm prestate. - // - Post-execution (finish()) runs once on the suffix executor, producing correct - // results for the full block. For OP Stack post-merge, the - // post_block_balance_increments are empty (no block rewards, no ommers, no - // withdrawals passed), so finish() only seals execution state. - let attrs = args.base.clone().into(); - let evm_env = - self.evm_config.next_evm_env(parent_header, &attrs).map_err(RethError::other)?; - let execution_ctx = self - .evm_config - .context_for_next_block(parent_header, attrs) - .map_err(RethError::other)?; - - // The cached bundle prestate already includes pre-execution state changes. - // Only set the state clear flag (Spurious Dragon empty-account handling). - state.set_state_clear_flag(true); - let evm = self.evm_config.evm_with_env(&mut state, evm_env); - let mut executor = self.evm_config.create_executor(evm, execution_ctx.clone()); - - for tx in transactions.iter().skip(cached_prefix.cached_tx_count).cloned() { - let _gas_used = executor.execute_transaction(tx)?; - } - - let (evm, suffix_execution_result) = executor.finish()?; - let (db, evm_env) = evm.finish(); - db.merge_transitions(BundleRetention::Reverts); - - let execution_result = - Self::merge_cached_and_suffix_results(cached_prefix, suffix_execution_result); - - let (hashed_state, state_root) = if args.compute_state_root { - trace!(target: "flashblocks", "Computing block state root"); - let hashed_state = state_provider.hashed_post_state(&db.bundle_state); - let (state_root, _) = state_provider - .state_root_with_updates(hashed_state.clone()) - .map_err(RethError::other)?; - (hashed_state, state_root) - } else { - let noop_provider = NoopProvider::default(); - let hashed_state = noop_provider.hashed_post_state(&db.bundle_state); - let (state_root, _) = noop_provider - .state_root_with_updates(hashed_state.clone()) - .map_err(RethError::other)?; - (hashed_state, state_root) - }; - let bundle = db.take_bundle(); - - let (block_transactions, senders): (Vec<_>, Vec<_>) = - transactions.iter().map(|tx| tx.1.clone().into_parts()).unzip(); - let block = self - .evm_config - .block_assembler() - .assemble_block(BlockAssemblerInput::new( - evm_env, - execution_ctx, - parent_header, - block_transactions, - &execution_result, - &bundle, - &state_provider, - state_root, - )) - .map_err(RethError::other)?; - let block = RecoveredBlock::new_unhashed(block, senders); - - (execution_result, block, hashed_state, bundle) - } else { - let mut builder = self - .evm_config - .builder_for_next_block(&mut state, parent_header, args.base.clone().into()) - .map_err(RethError::other)?; - - builder.apply_pre_execution_changes()?; - - for tx in transactions { - let _gas_used = builder.execute_transaction(tx)?; - } - - let BlockBuilderOutcome { execution_result, block, hashed_state, .. } = - if args.compute_state_root { - trace!(target: "flashblocks", "Computing block state root"); - builder.finish(&state_provider)? - } else { - builder.finish(NoopProvider::default())? - }; - let bundle = state.take_bundle(); - - (execution_result, block, hashed_state, bundle) - }; - - // Update transaction cache if provided (only in canonical mode) - if let Some(cache) = tx_cache - && is_canonical - { - cache.update_with_execution_meta_for_parent( - args.base.block_number, - canonical_parent_hash, - tx_hashes, - bundle.clone(), - execution_result.receipts.clone(), - CachedExecutionMeta { - requests: execution_result.requests.clone(), - gas_used: execution_result.gas_used, - blob_gas_used: execution_result.blob_gas_used, - }, - ); - } - - let execution_outcome = BlockExecutionOutput { state: bundle, result: execution_result }; - let execution_outcome = Arc::new(execution_outcome); - - // Create pending state for subsequent builds. - // Use the locally built block hash for both parent matching and speculative - // execution context to avoid split-hash ambiguity. - let local_block_hash = block.hash(); - if local_block_hash != args.last_flashblock_hash { - trace!( - target: "flashblocks", - local_block_hash = ?local_block_hash, - sequencer_block_hash = ?args.last_flashblock_hash, - block_number = block.number(), - "Local block hash differs from sequencer-provided hash; speculative chaining will follow local hash" - ); - } - let sealed_header = SealedHeader::new(block.header().clone(), local_block_hash); - let pending_state = PendingBlockState::new( - local_block_hash, - block.number(), - args.base.parent_hash, - canonical_anchor, - execution_outcome.clone(), - request_cache.clone(), - ) - .with_sealed_header(sealed_header); - - let pending_block = PendingBlock::with_executed_block( - Instant::now() + Duration::from_secs(1), - ExecutedBlock::new( - block.into(), - execution_outcome, - ComputedTrieData::without_trie_input( - Arc::new(hashed_state.into_sorted()), - Arc::default(), - ), - ), - ); - let pending_flashblock = PendingFlashBlock::new( - pending_block, - canonical_anchor, - args.last_flashblock_index, - args.last_flashblock_hash, - args.compute_state_root, - ); - - Ok(Some(BuildResult { pending_flashblock, cached_reads: request_cache, pending_state })) - } - - fn merge_cached_and_suffix_results( - cached_prefix: CachedPrefixExecutionResult, - mut suffix_result: BlockExecutionResult, - ) -> BlockExecutionResult { - N::Receipt::add_cumulative_gas_offset(&mut suffix_result.receipts, cached_prefix.gas_used); - - let mut receipts = cached_prefix.receipts; - receipts.extend(suffix_result.receipts); - - // Use only suffix requests: the suffix executor's finish() produces - // post-execution requests from the complete block state (cached prestate + - // suffix changes). The cached prefix requests came from an intermediate - // state and must not be merged. - let requests = suffix_result.requests; - - BlockExecutionResult { - receipts, - requests, - gas_used: cached_prefix.gas_used.saturating_add(suffix_result.gas_used), - blob_gas_used: cached_prefix.blob_gas_used.saturating_add(suffix_result.blob_gas_used), - } - } -} - -#[inline] -fn is_consistent_speculative_parent_hashes( - incoming_parent_hash: B256, - pending_block_hash: B256, - pending_sealed_hash: B256, -) -> bool { - incoming_parent_hash == pending_block_hash && pending_block_hash == pending_sealed_hash -} - -impl Clone for FlashBlockBuilder { - fn clone(&self) -> Self { - Self { evm_config: self.evm_config.clone(), provider: self.provider.clone() } - } -} - -#[cfg(test)] -mod tests { - use super::{is_consistent_speculative_parent_hashes, BuildArgs, FlashBlockBuilder}; - use crate::{tx_cache::CachedExecutionMeta, TransactionCache}; - use alloy_consensus::{SignableTransaction, TxEip1559}; - use alloy_eips::eip2718::Encodable2718; - use alloy_network::TxSignerSync; - use alloy_primitives::{Address, StorageKey, StorageValue, TxKind, B256, U256}; - use alloy_signer_local::PrivateKeySigner; - use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; - use op_revm::constants::L1_BLOCK_CONTRACT; - use reth_optimism_chainspec::OP_MAINNET; - use reth_optimism_evm::OpEvmConfig; - use reth_optimism_primitives::{OpBlock, OpPrimitives, OpTransactionSigned}; - use reth_primitives_traits::{AlloyBlockHeader, Recovered, SignerRecoverable}; - use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; - use reth_provider::ChainSpecProvider; - use reth_storage_api::BlockReaderIdExt; - use std::str::FromStr; - - fn signed_transfer_tx( - signer: &PrivateKeySigner, - nonce: u64, - recipient: Address, - ) -> OpTransactionSigned { - let mut tx = TxEip1559 { - chain_id: 10, // OP Mainnet chain id - nonce, - gas_limit: 100_000, - max_priority_fee_per_gas: 1_000_000_000, - max_fee_per_gas: 2_000_000_000, - to: TxKind::Call(recipient), - value: U256::from(1), - ..Default::default() - }; - let signature = signer.sign_transaction_sync(&mut tx).expect("signing tx succeeds"); - tx.into_signed(signature).into() - } - - fn into_encoded_recovered( - tx: OpTransactionSigned, - signer: Address, - ) -> alloy_eips::eip2718::WithEncoded> { - let encoded = tx.encoded_2718(); - Recovered::new_unchecked(tx, signer).into_encoded_with(encoded) - } - - #[test] - fn speculative_parent_hashes_must_all_match() { - let h = B256::repeat_byte(0x11); - assert!(is_consistent_speculative_parent_hashes(h, h, h)); - } - - #[test] - fn speculative_parent_hashes_reject_any_mismatch() { - let incoming = B256::repeat_byte(0x11); - let pending = B256::repeat_byte(0x22); - let sealed = B256::repeat_byte(0x33); - - assert!(!is_consistent_speculative_parent_hashes(incoming, pending, sealed)); - assert!(!is_consistent_speculative_parent_hashes(incoming, incoming, sealed)); - assert!(!is_consistent_speculative_parent_hashes(incoming, pending, pending)); - } - - #[test] - fn canonical_build_reuses_cached_prefix_execution() { - let provider = MockEthProvider::::new().with_chain_spec(OP_MAINNET.clone()); - let genesis_hash = provider.chain_spec().genesis_hash(); - let genesis_block = - OpBlock::new(provider.chain_spec().genesis_header().clone(), Default::default()); - provider.add_block(genesis_hash, genesis_block); - - let recipient = Address::repeat_byte(0x22); - let signer = PrivateKeySigner::random(); - let tx_a = signed_transfer_tx(&signer, 0, recipient); - let tx_b = signed_transfer_tx(&signer, 1, recipient); - let tx_c = signed_transfer_tx(&signer, 2, recipient); - let signer = tx_a.recover_signer().expect("tx signer recovery succeeds"); - - provider.add_account(signer, ExtendedAccount::new(0, U256::from(1_000_000_000_000_000u64))); - provider.add_account(recipient, ExtendedAccount::new(0, U256::ZERO)); - provider.add_account( - L1_BLOCK_CONTRACT, - ExtendedAccount::new(1, U256::ZERO).extend_storage([ - (StorageKey::with_last_byte(1), StorageValue::from(1_000_000_000u64)), - (StorageKey::with_last_byte(5), StorageValue::from(188u64)), - (StorageKey::with_last_byte(6), StorageValue::from(684_000u64)), - ( - StorageKey::with_last_byte(3), - StorageValue::from_str( - "0x0000000000000000000000000000000000001db0000d27300000000000000005", - ) - .expect("valid L1 fee scalar storage value"), - ), - ]), - ); - - let latest = provider - .latest_header() - .expect("provider latest header query succeeds") - .expect("genesis header exists"); - - let base = OpFlashblockPayloadBase { - parent_hash: latest.hash(), - parent_beacon_block_root: B256::ZERO, - fee_recipient: Address::ZERO, - prev_randao: B256::repeat_byte(0x55), - block_number: latest.number() + 1, - gas_limit: 30_000_000, - timestamp: latest.timestamp() + 2, - extra_data: Default::default(), - base_fee_per_gas: U256::from(1_000_000_000u64), - }; - let base_parent_hash = base.parent_hash; - - let tx_a_hash = B256::from(*tx_a.tx_hash()); - let tx_b_hash = B256::from(*tx_b.tx_hash()); - let tx_c_hash = B256::from(*tx_c.tx_hash()); - - let tx_a = into_encoded_recovered(tx_a, signer); - let tx_b = into_encoded_recovered(tx_b, signer); - let tx_c = into_encoded_recovered(tx_c, signer); - - let evm_config = OpEvmConfig::optimism(OP_MAINNET.clone()); - let builder = FlashBlockBuilder::new(evm_config, provider); - let mut tx_cache = TransactionCache::::new(); - - let first = builder - .execute( - BuildArgs { - base: base.clone(), - transactions: vec![tx_a.clone(), tx_b.clone()], - cached_state: None, - last_flashblock_index: 0, - last_flashblock_hash: B256::repeat_byte(0xA0), - compute_state_root: false, - pending_parent: None, - }, - Some(&mut tx_cache), - ) - .expect("first build succeeds") - .expect("first build is canonical"); - - assert_eq!(first.pending_state.execution_outcome.result.receipts.len(), 2); - - let cached_hashes = vec![tx_a_hash, tx_b_hash]; - let (bundle, receipts, requests, gas_used, blob_gas_used, skip) = tx_cache - .get_resumable_state_with_execution_meta_for_parent( - base.block_number, - base_parent_hash, - &cached_hashes, - ) - .expect("cache should contain first build execution state"); - assert_eq!(skip, 2); - - let mut tampered_receipts = receipts.to_vec(); - tampered_receipts[0].as_receipt_mut().cumulative_gas_used = - tampered_receipts[0].as_receipt().cumulative_gas_used.saturating_add(17); - let expected_tampered_gas = tampered_receipts[0].as_receipt().cumulative_gas_used; - - tx_cache.update_with_execution_meta_for_parent( - base.block_number, - base_parent_hash, - cached_hashes, - bundle.clone(), - tampered_receipts, - CachedExecutionMeta { requests: requests.clone(), gas_used, blob_gas_used }, - ); - - let second_hashes = vec![tx_a_hash, tx_b_hash, tx_c_hash]; - let (_, _, _, _, _, skip) = tx_cache - .get_resumable_state_with_execution_meta_for_parent( - base.block_number, - base_parent_hash, - &second_hashes, - ) - .expect("second tx list should extend cached prefix"); - assert_eq!(skip, 2); - - let second = builder - .execute( - BuildArgs { - base, - transactions: vec![tx_a, tx_b, tx_c], - cached_state: None, - last_flashblock_index: 1, - last_flashblock_hash: B256::repeat_byte(0xA1), - compute_state_root: false, - pending_parent: None, - }, - Some(&mut tx_cache), - ) - .expect("second build succeeds") - .expect("second build is canonical"); - - let receipts = &second.pending_state.execution_outcome.result.receipts; - assert_eq!(receipts.len(), 3); - assert_eq!(receipts[0].as_receipt().cumulative_gas_used, expected_tampered_gas); - assert!( - receipts[2].as_receipt().cumulative_gas_used - > receipts[1].as_receipt().cumulative_gas_used - ); - } -} diff --git a/crates/flashblocks/src/ws/decoding.rs b/crates/flashblocks/src/ws/decoding.rs index 64d96dc5..91651ec9 100644 --- a/crates/flashblocks/src/ws/decoding.rs +++ b/crates/flashblocks/src/ws/decoding.rs @@ -1,26 +1,25 @@ -use crate::FlashBlock; use alloy_primitives::bytes::Bytes; use std::io; +use xlayer_builder::broadcast::XLayerFlashblockPayload; + /// A trait for decoding flashblocks from bytes. pub trait FlashBlockDecoder: Send + 'static { - /// Decodes `bytes` into a [`FlashBlock`]. - fn decode(&self, bytes: Bytes) -> eyre::Result; + /// Decodes `bytes` into an [`XLayerFlashblockPayload`]. + fn decode(&self, bytes: Bytes) -> eyre::Result; } /// Default implementation of the decoder. impl FlashBlockDecoder for () { - fn decode(&self, bytes: Bytes) -> eyre::Result { + fn decode(&self, bytes: Bytes) -> eyre::Result { decode_flashblock(bytes) } } -pub(crate) fn decode_flashblock(bytes: Bytes) -> eyre::Result { +pub(crate) fn decode_flashblock(bytes: Bytes) -> eyre::Result { let bytes = crate::ws::decoding::try_parse_message(bytes)?; - - let payload: FlashBlock = + let payload: XLayerFlashblockPayload = serde_json::from_slice(&bytes).map_err(|e| eyre::eyre!("failed to parse message: {e}"))?; - Ok(payload) } diff --git a/crates/flashblocks/src/ws/mod.rs b/crates/flashblocks/src/ws/mod.rs index 651d83c9..3a69d13f 100644 --- a/crates/flashblocks/src/ws/mod.rs +++ b/crates/flashblocks/src/ws/mod.rs @@ -1,6 +1,5 @@ -pub use stream::{WsConnect, WsFlashBlockStream}; - mod decoding; -pub use decoding::FlashBlockDecoder; - mod stream; + +pub use decoding::FlashBlockDecoder; +pub use stream::WsFlashBlockStream; diff --git a/crates/flashblocks/src/ws/stream.rs b/crates/flashblocks/src/ws/stream.rs index 18726219..c61c7163 100644 --- a/crates/flashblocks/src/ws/stream.rs +++ b/crates/flashblocks/src/ws/stream.rs @@ -1,4 +1,4 @@ -use crate::{ws::FlashBlockDecoder, FlashBlock}; +use crate::ws::FlashBlockDecoder; use futures_util::{ stream::{SplitSink, SplitStream}, FutureExt, Sink, Stream, StreamExt, @@ -18,7 +18,9 @@ use tokio_tungstenite::{ use tracing::debug; use url::Url; -/// An asynchronous stream of [`FlashBlock`] from a websocket connection. +use xlayer_builder::broadcast::XLayerFlashblockPayload; + +/// An asynchronous stream of [`XLayerFlashblockPayload`] from a websocket connection. /// /// The stream attempts to connect to a websocket URL and then decode each received item. /// @@ -48,7 +50,7 @@ impl WsFlashBlockStream { } } - /// Sets the [`FlashBlock`] decoder for the websocket stream. + /// Sets the [`XLayerFlashblockPayload`] decoder for the websocket stream. pub fn with_decoder(self, decoder: Box) -> Self { Self { decoder, ..self } } @@ -75,7 +77,7 @@ where S: Sink + Send + Unpin, C: WsConnect + Clone + Send + 'static + Unpin, { - type Item = eyre::Result; + type Item = eyre::Result; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -244,6 +246,7 @@ mod tests { use super::*; use alloy_primitives::bytes::Bytes; use brotli::enc::BrotliEncoderParams; + use op_alloy_rpc_types_engine::OpFlashblockPayload; use std::{future, iter}; use tokio_tungstenite::tungstenite::{ protocol::frame::{coding::CloseCode, Frame}, @@ -424,22 +427,22 @@ mod tests { fn to_json_message, F: Fn(B) -> Message>( wrapper_f: F, - ) -> impl Fn(&FlashBlock) -> Result + use { + ) -> impl Fn(&OpFlashblockPayload) -> Result + use { move |block| to_json_message_using(block, &wrapper_f) } - fn to_json_binary_message(block: &FlashBlock) -> Result { + fn to_json_binary_message(block: &OpFlashblockPayload) -> Result { to_json_message_using(block, Message::Binary) } fn to_json_message_using, F: Fn(B) -> Message>( - block: &FlashBlock, + block: &OpFlashblockPayload, wrapper_f: F, ) -> Result { Ok(wrapper_f(B::try_from(Bytes::from(serde_json::to_vec(block).unwrap())).unwrap())) } - fn to_brotli_message(block: &FlashBlock) -> Result { + fn to_brotli_message(block: &OpFlashblockPayload) -> Result { let json = serde_json::to_vec(block).unwrap(); let mut compressed = Vec::new(); brotli::BrotliCompress( @@ -451,7 +454,7 @@ mod tests { Ok(Message::Binary(Bytes::from(compressed))) } - fn flashblock() -> FlashBlock { + fn flashblock() -> OpFlashblockPayload { Default::default() } @@ -460,7 +463,7 @@ mod tests { #[test_case::test_case(to_brotli_message; "brotli")] #[tokio::test] async fn test_stream_decodes_messages_successfully( - to_message: impl Fn(&FlashBlock) -> Result, + to_message: impl Fn(&OpFlashblockPayload) -> Result, ) { let flashblocks = [flashblock()]; let connector = FakeConnector::from(flashblocks.iter().map(to_message)); @@ -469,7 +472,8 @@ mod tests { let actual_messages: Vec<_> = stream.take(1).map(Result::unwrap).collect().await; let expected_messages = flashblocks.to_vec(); - + let actual_messages: Vec<_> = actual_messages.iter().map(|m| &m.inner).collect(); + let expected_messages: Vec<_> = expected_messages.iter().collect(); assert_eq!(actual_messages, expected_messages); } @@ -486,7 +490,7 @@ mod tests { let actual_message = stream.next().await.expect("Binary message should not be ignored").unwrap(); - assert_eq!(actual_message, expected_message) + assert_eq!(actual_message.inner, expected_message) } #[tokio::test] diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 311ac95b..febf835f 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -11,16 +11,43 @@ repository.workspace = true default = [] [dependencies] +xlayer-flashblocks.workspace = true + +# reth +reth-chain-state.workspace = true reth-optimism-rpc.workspace = true -reth-rpc.workspace = true +reth-optimism-primitives.workspace = true +reth-primitives-traits.workspace = true +reth-rpc-convert.workspace = true reth-rpc-eth-api.workspace = true +reth-rpc-eth-types.workspace = true +reth-rpc-server-types.workspace = true +reth-revm.workspace = true +reth-storage-api.workspace = true + +# alloy +alloy-consensus.workspace = true +alloy-eips.workspace = true +alloy-primitives.workspace = true +alloy-rpc-types-eth.workspace = true +alloy-serde.workspace = true + +# op +op-alloy-network.workspace = true +op-alloy-rpc-types.workspace = true +# rpc jsonrpsee.workspace = true -serde.workspace = true +jsonrpsee-types.workspace = true + +# misc +futures.workspace = true +tokio.workspace = true +tokio-stream.workspace = true +tracing.workspace = true [dev-dependencies] tokio = { workspace = true, features = ["rt", "macros"] } [lints] workspace = true - diff --git a/crates/rpc/src/default.rs b/crates/rpc/src/default.rs new file mode 100644 index 00000000..544d7ef3 --- /dev/null +++ b/crates/rpc/src/default.rs @@ -0,0 +1,75 @@ +use jsonrpsee::{ + core::{async_trait, RpcResult}, + proc_macros::rpc, +}; + +use reth_optimism_primitives::OpPrimitives; +use reth_optimism_rpc::SequencerClient; + +use xlayer_flashblocks::FlashblockStateCache; + +/// Trait for accessing sequencer client from backend +pub trait SequencerClientProvider { + /// Returns the sequencer client if available + fn sequencer_client(&self) -> Option<&SequencerClient>; +} + +/// X Layer default Eth JSON-RPC API extension trait. +#[rpc(server, namespace = "eth")] +pub trait DefaultRpcExtApi { + /// Returns boolean indicating if the node's flashblocks RPC functionality is enabled, + /// and if the flashblocks state cache is initialized. + /// + /// Returns `true` if the flashblocks state cache is not `None`, and when the flashblocks + /// state cache has been initialized (i.e. confirm height > 0), meaning the node is actively + /// receiving and caching flashblock data. + #[method(name = "flashblocksEnabled")] + async fn flashblocks_enabled(&self) -> RpcResult; +} + +/// X Layer default Eth JSON-RPC API extension implementation. +#[derive(Debug, Clone)] +pub struct DefaultRpcExt { + flashblocks_state: Option>, +} + +impl DefaultRpcExt { + /// Creates a new [`DefaultRpcExt`]. + pub fn new(flashblocks_state: Option>) -> Self { + Self { flashblocks_state } + } +} + +#[async_trait] +impl DefaultRpcExtApiServer for DefaultRpcExt { + /// Handler for: `eth_flashblocksEnabled` + async fn flashblocks_enabled(&self) -> RpcResult { + Ok(self.flashblocks_state.as_ref().is_some_and(|cache| cache.get_confirm_height() > 0)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_flashblocks_disabled_when_no_cache() { + let ext = DefaultRpcExt::new(None); + assert!(ext.flashblocks_state.is_none()); + } + + #[test] + fn test_flashblocks_disabled_at_zero_height() { + let cache = FlashblockStateCache::::new( + reth_chain_state::CanonicalInMemoryState::new( + Default::default(), + Default::default(), + None, + None, + None, + ), + ); + let ext = DefaultRpcExt::new(Some(cache)); + assert!(ext.flashblocks_state.as_ref().unwrap().get_confirm_height() == 0); + } +} diff --git a/crates/rpc/src/flashblocks.rs b/crates/rpc/src/flashblocks.rs new file mode 100644 index 00000000..20a2b25c --- /dev/null +++ b/crates/rpc/src/flashblocks.rs @@ -0,0 +1,606 @@ +use crate::helper::{ + to_block_receipts, to_rpc_block, to_rpc_transaction, to_rpc_transaction_from_bar_and_index, +}; +use futures::StreamExt; +use jsonrpsee::{ + core::{async_trait, RpcResult}, + proc_macros::rpc, +}; +use tokio_stream::wrappers::WatchStream; +use tracing::*; + +use alloy_eips::eip2718::Encodable2718; +use alloy_eips::{BlockId, BlockNumberOrTag}; +use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; +use alloy_rpc_types_eth::{ + state::{EvmOverrides, StateOverride}, + BlockOverrides, Index, +}; +use alloy_serde::JsonStorageKey; +use op_alloy_network::Optimism; +use op_alloy_rpc_types::OpTransactionRequest; + +use reth_chain_state::CanonStateSubscriptions; +use reth_optimism_primitives::OpPrimitives; +use reth_optimism_rpc::{OpEthApi, OpEthApiError}; +use reth_primitives_traits::SealedHeaderFor; +use reth_revm::{database::StateProviderDatabase, db::State}; +use reth_rpc_convert::{RpcConvert, RpcTransaction}; +use reth_rpc_eth_api::{ + helpers::{estimate::EstimateCall, Call, FullEthApi, LoadState}, + EthApiServer, EthApiTypes, FromEvmError, RpcBlock, RpcNodeCore, RpcReceipt, +}; +use reth_rpc_eth_types::{block::convert_transaction_receipt, EthApiError}; +use reth_rpc_server_types::result::ToRpcResult; +use reth_storage_api::{StateProvider, StateProviderBox, StateProviderFactory}; + +use xlayer_flashblocks::FlashblockStateCache; + +/// Eth API override for flashblocks RPC integration. +#[cfg_attr(not(test), rpc(server, namespace = "eth"))] +#[cfg_attr(test, rpc(server, client, namespace = "eth"))] +pub trait FlashblocksEthApiOverride { + // ----------------- Block apis ----------------- + /// Returns the current block number as the maximum of the flashblocks confirm + /// height and the canonical chain height. + /// + /// Note: This may return a height ahead of the canonical chain when flashblocks + /// are actively being processed. Block data at this height is available through + /// the overridden `eth_getBlockByNumber` and `eth_getTransactionByHash` methods, + /// but non-overridden methods (e.g., `eth_getLogs`) only see canonical state. + #[method(name = "blockNumber")] + async fn block_number(&self) -> RpcResult; + + /// Returns block by number, with the flashblock state cache overlay support for pending and + /// confirmed blocks. + #[method(name = "getBlockByNumber")] + async fn block_by_number( + &self, + number: BlockNumberOrTag, + full: bool, + ) -> RpcResult>>; + + /// Returns block by block hash, with the flashblock state cache overlay support for pending + /// and confirmed blocks. + #[method(name = "getBlockByHash")] + async fn block_by_hash(&self, hash: B256, full: bool) -> RpcResult>>; + + /// Returns all the receipts in a block by block number, with the flashblock state cache + /// overlay support for pending and confirmed blocks. + #[method(name = "getBlockReceipts")] + async fn block_receipts( + &self, + block_id: BlockNumberOrTag, + ) -> RpcResult>>>; + + /// Returns the number of transactions in a block by block number, with the flashblock state + /// cache overlay support for pending and confirmed blocks. + #[method(name = "getBlockTransactionCountByNumber")] + async fn block_transaction_count_by_number( + &self, + number: BlockNumberOrTag, + ) -> RpcResult>; + + /// Returns the number of transactions in a block by block hash, with the flashblock state + /// cache overlay support for pending and confirmed blocks. + #[method(name = "getBlockTransactionCountByHash")] + async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult>; + + // ----------------- Transaction apis ----------------- + /// Returns the information about a transaction requested by transaction hash, with the + /// flashblock state cache overlay support for pending and confirmed blocks. + #[method(name = "getTransactionByHash")] + async fn transaction_by_hash( + &self, + hash: TxHash, + ) -> RpcResult>>; + + /// Returns the EIP-2718 encoded transaction if it exists, with the flashblock state cache + /// overlay support for pending and confirmed blocks. + #[method(name = "getRawTransactionByHash")] + async fn raw_transaction_by_hash(&self, hash: B256) -> RpcResult>; + + /// Returns the receipt of a transaction by transaction hash, with the flashblock state cache + /// overlay support for pending and confirmed blocks. + #[method(name = "getTransactionReceipt")] + async fn transaction_receipt(&self, hash: TxHash) -> RpcResult>>; + + /// Returns information about a raw transaction by block hash and transaction index position, + /// with the flashblock state cache overlay support for pending and confirmed blocks. + #[method(name = "getTransactionByBlockHashAndIndex")] + async fn transaction_by_block_hash_and_index( + &self, + block_hash: B256, + index: Index, + ) -> RpcResult>>; + + /// Returns information about a transaction by block number and transaction index position, + /// with the flashblock state cache overlay support for pending and confirmed blocks. + #[method(name = "getTransactionByBlockNumberAndIndex")] + async fn transaction_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult>>; + + /// Returns information about a raw transaction by block hash and transaction index position, + /// with the flashblock state cache overlay support for pending and confirmed blocks. + #[method(name = "getRawTransactionByBlockHashAndIndex")] + async fn raw_transaction_by_block_hash_and_index( + &self, + hash: B256, + index: Index, + ) -> RpcResult>; + + /// Returns information about a raw transaction by block number and transaction index position, + /// with the flashblock state cache overlay support for pending and confirmed blocks. + #[method(name = "getRawTransactionByBlockNumberAndIndex")] + async fn raw_transaction_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult>; + + /// Sends a signed transaction and awaits the transaction receipt, with the flashblock state + /// cache overlay support for pending and confirmed blocks. + /// + /// This will return a timeout error if the transaction isn't included within some time period. + #[method(name = "sendRawTransactionSync")] + async fn send_raw_transaction_sync(&self, bytes: Bytes) -> RpcResult>; + + // ----------------- State apis ----------------- + /// Executes a new message call immediately without creating a transaction on the block chain, + /// with the flashblock state cache overlay support for pending and confirmed block states. + #[method(name = "call")] + async fn call( + &self, + transaction: OpTransactionRequest, + block_number: Option, + state_overrides: Option, + block_overrides: Option>, + ) -> RpcResult; + + /// Generates and returns an estimate of how much gas is necessary to allow the transaction to + /// complete, with the flashblock state cache overlay support for pending and confirmed block + /// states. + #[method(name = "estimateGas")] + async fn estimate_gas( + &self, + transaction: OpTransactionRequest, + block_number: Option, + overrides: Option, + ) -> RpcResult; + + /// Returns the balance of the account of given address, with the flashblock state cache + /// overlay support for pending and confirmed block states. + #[method(name = "getBalance")] + async fn balance(&self, address: Address, block_number: Option) -> RpcResult; + + /// Returns the number of transactions sent from an address at given block number, with the + /// flashblock state cache overlay support for pending and confirmed block states. + #[method(name = "getTransactionCount")] + async fn transaction_count( + &self, + address: Address, + block_number: Option, + ) -> RpcResult; + + /// Returns code at a given address at given block number, with the flashblock state cache + /// overlay support for pending and confirmed block states. + #[method(name = "getCode")] + async fn get_code(&self, address: Address, block_number: Option) -> RpcResult; + + /// Returns the value from a storage position at a given address, with the flashblock state + /// cache overlay support for pending and confirmed block states. + #[method(name = "getStorageAt")] + async fn storage_at( + &self, + address: Address, + slot: JsonStorageKey, + block_number: Option, + ) -> RpcResult; +} + +/// Extended Eth API with flashblocks cache overlay. +#[derive(Debug)] +pub struct FlashblocksEthApiExt { + eth_api: OpEthApi, + converter: Rpc, + flashblocks_state: FlashblockStateCache, +} + +impl FlashblocksEthApiExt { + /// Creates a new [`FlashblocksEthApiExt`]. + pub fn new( + eth_api: OpEthApi, + flashblocks_state: FlashblockStateCache, + ) -> Self + where + Rpc: Clone + RpcConvert, + { + let converter = eth_api.converter().clone(); + Self { eth_api, converter, flashblocks_state } + } +} + +#[async_trait] +impl FlashblocksEthApiOverrideServer for FlashblocksEthApiExt +where + N: RpcNodeCore, + Rpc: RpcConvert + + RpcConvert, + OpEthApi: FullEthApi + + EthApiTypes + + RpcNodeCore + + LoadState + + Call + + EstimateCall + + Send + + Sync + + 'static, +{ + // ----------------- Block apis ----------------- + /// Handler for: `eth_blockNumber` + async fn block_number(&self) -> RpcResult { + trace!(target: "rpc::eth", "Serving eth_blockNumber"); + let fb_height = self.flashblocks_state.get_confirm_height(); + // `EthApiServer::block_number` is synchronous (not async) + let canon_height: U256 = EthApiServer::block_number(&self.eth_api)?; + let fb_height = U256::from(fb_height); + Ok(std::cmp::max(fb_height, canon_height)) + } + + /// Handler for: `eth_getBlockByNumber` + async fn block_by_number( + &self, + number: BlockNumberOrTag, + full: bool, + ) -> RpcResult>> { + trace!(target: "rpc::eth", ?number, ?full, "Serving eth_getBlockByNumber"); + if let Some(bar) = self.flashblocks_state.get_rpc_block(number) { + return to_rpc_block(&bar, full, &self.converter).map(Some).map_err(Into::into); + } + EthApiServer::block_by_number(&self.eth_api, number, full).await + } + + /// Handler for: `eth_getBlockByHash` + async fn block_by_hash(&self, hash: B256, full: bool) -> RpcResult>> { + trace!(target: "rpc::eth", ?hash, ?full, "Serving eth_getBlockByHash"); + if let Some(bar) = self.flashblocks_state.get_block_by_hash(&hash) { + return to_rpc_block(&bar, full, &self.converter).map(Some).map_err(Into::into); + } + EthApiServer::block_by_hash(&self.eth_api, hash, full).await + } + + /// Handler for: `eth_getBlockReceipts` + async fn block_receipts( + &self, + block_id: BlockNumberOrTag, + ) -> RpcResult>>> { + trace!(target: "rpc::eth", ?block_id, "Serving eth_getBlockReceipts"); + if let Some(bar) = self.flashblocks_state.get_rpc_block(block_id) { + return to_block_receipts(&bar, &self.converter).map(Some).map_err(Into::into); + } + EthApiServer::block_receipts(&self.eth_api, block_id.into()).await + } + + /// Handler for: `eth_getBlockTransactionCountByNumber` + async fn block_transaction_count_by_number( + &self, + number: BlockNumberOrTag, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?number, "Serving eth_getBlockTransactionCountByNumber"); + if let Some(bar) = self.flashblocks_state.get_rpc_block(number) { + let count = bar.block.body().transactions.len(); + return Ok(Some(U256::from(count))); + } + EthApiServer::block_transaction_count_by_number(&self.eth_api, number).await + } + + /// Handler for: `eth_getBlockTransactionCountByHash` + async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getBlockTransactionCountByHash"); + if let Some(bar) = self.flashblocks_state.get_block_by_hash(&hash) { + let count = bar.block.body().transactions.len(); + return Ok(Some(U256::from(count))); + } + EthApiServer::block_transaction_count_by_hash(&self.eth_api, hash).await + } + + // ----------------- Transaction apis ----------------- + /// Handler for: `eth_getTransactionByHash` + async fn transaction_by_hash( + &self, + hash: TxHash, + ) -> RpcResult>> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionByHash"); + if let Some((info, bar)) = self.flashblocks_state.get_tx_info(&hash) { + return Ok(Some(to_rpc_transaction(&info, &bar, &self.converter)?)); + } + EthApiServer::transaction_by_hash(&self.eth_api, hash).await + } + + /// Handler for: `eth_getRawTransactionByHash` + async fn raw_transaction_by_hash(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getRawTransactionByHash"); + if let Some((info, _)) = self.flashblocks_state.get_tx_info(&hash) { + return Ok(Some(info.tx.encoded_2718().into())); + } + EthApiServer::raw_transaction_by_hash(&self.eth_api, hash).await + } + + /// Handler for: `eth_getTransactionReceipt` + async fn transaction_receipt(&self, hash: TxHash) -> RpcResult>> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionReceipt"); + if let Some((_, bar)) = self.flashblocks_state.get_tx_info(&hash) + && let Some(Ok(receipt)) = + bar.find_and_convert_transaction_receipt(hash, &self.converter) + { + return Ok(Some(receipt)); + } + EthApiServer::transaction_receipt(&self.eth_api, hash).await + } + + /// Handler for: `eth_getTransactionByBlockHashAndIndex` + async fn transaction_by_block_hash_and_index( + &self, + block_hash: B256, + index: Index, + ) -> RpcResult>> { + trace!(target: "rpc::eth", ?block_hash, ?index, "Serving eth_getTransactionByBlockHashAndIndex"); + if let Some(bar) = self.flashblocks_state.get_block_by_hash(&block_hash) { + return to_rpc_transaction_from_bar_and_index(&bar, index.into(), &self.converter) + .map_err(Into::into); + } + EthApiServer::transaction_by_block_hash_and_index(&self.eth_api, block_hash, index).await + } + + /// Handler for: `eth_getTransactionByBlockNumberAndIndex` + async fn transaction_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult>> { + trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getTransactionByBlockNumberAndIndex"); + if let Some(bar) = self.flashblocks_state.get_rpc_block(number) { + return to_rpc_transaction_from_bar_and_index(&bar, index.into(), &self.converter) + .map_err(Into::into); + } + EthApiServer::transaction_by_block_number_and_index(&self.eth_api, number, index).await + } + + /// Handler for: `eth_getRawTransactionByBlockHashAndIndex` + async fn raw_transaction_by_block_hash_and_index( + &self, + hash: B256, + index: Index, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getRawTransactionByBlockHashAndIndex"); + let idx: usize = index.into(); + if let Some(bar) = self.flashblocks_state.get_block_by_hash(&hash) + && let Some(tx) = bar.block.body().transactions.get(idx) + { + return Ok(Some(tx.encoded_2718().into())); + } + EthApiServer::raw_transaction_by_block_hash_and_index(&self.eth_api, hash, index).await + } + + /// Handler for: `eth_getRawTransactionByBlockNumberAndIndex` + async fn raw_transaction_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getRawTransactionByBlockNumberAndIndex"); + let idx: usize = index.into(); + if let Some(bar) = self.flashblocks_state.get_rpc_block(number) + && let Some(tx) = bar.block.body().transactions.get(idx) + { + return Ok(Some(tx.encoded_2718().into())); + } + EthApiServer::raw_transaction_by_block_number_and_index(&self.eth_api, number, index).await + } + + /// Handler for: `eth_sendRawTransactionSync` + async fn send_raw_transaction_sync(&self, tx: Bytes) -> RpcResult> { + use reth_rpc_eth_api::helpers::EthTransactions; + + trace!(target: "rpc::eth", ?tx, "Serving eth_sendRawTransactionSync"); + let timeout_duration = self.eth_api.send_raw_transaction_sync_timeout(); + let hash = as EthTransactions>::send_raw_transaction(&self.eth_api, tx) + .await + .map_err(|e| -> jsonrpsee_types::error::ErrorObject<'static> { e.into() })?; + let converter = &self.converter; + + let mut canonical_stream = self.eth_api.provider().canonical_state_stream(); + let mut flashblock_stream = + WatchStream::new(self.flashblocks_state.subscribe_pending_sequence()); + + tokio::time::timeout(timeout_duration, async { + loop { + tokio::select! { + biased; + // check if the tx was preconfirmed in the latest flashblocks pending sequence + pending = flashblock_stream.next() => { + if let Some(pending_sequence) = pending.flatten() { + let bar = pending_sequence.get_block_and_receipts(); + if let Some(receipt) = + bar.find_and_convert_transaction_receipt(hash, converter) + { + return receipt.map_err(Into::into); + } + } + } + // Listen for regular canonical block updates for inclusion + canonical_notification = canonical_stream.next() => { + if let Some(notification) = canonical_notification { + let chain = notification.committed(); + if let Some((block, indexed_tx, receipt, all_receipts)) = + chain.find_transaction_and_receipt_by_hash(hash) + && let Some(receipt) = convert_transaction_receipt( + block, + all_receipts, + indexed_tx, + receipt, + converter, + ) + .transpose() + .map_err(|e: OpEthApiError| -> jsonrpsee_types::error::ErrorObject<'static> { e.into() })? + { + return Ok(receipt); + } + } else { + // Canonical stream ended + break; + } + } + } + } + Err(EthApiError::TransactionConfirmationTimeout { hash, duration: timeout_duration } + .into()) + }) + .await + .unwrap_or_else(|_elapsed| { + Err(EthApiError::TransactionConfirmationTimeout { hash, duration: timeout_duration } + .into()) + }) + } + + // ----------------- State apis ----------------- + /// Handler for: `eth_call` + async fn call( + &self, + transaction: OpTransactionRequest, + block_number: Option, + state_overrides: Option, + block_overrides: Option>, + ) -> RpcResult { + trace!(target: "rpc::eth", ?transaction, ?block_number, ?state_overrides, ?block_overrides, "Serving eth_call"); + if let Some((state, header)) = self.get_flashblock_state_provider_by_id(block_number)? { + let evm_env = self + .eth_api + .evm_env_for_header(&header) + .map_err(|e| -> jsonrpsee_types::error::ErrorObject<'static> { e.into() })?; + let mut db = State::builder().with_database(StateProviderDatabase::new(state)).build(); + let (evm_env, tx_env) = self + .eth_api + .prepare_call_env( + evm_env, + transaction, + &mut db, + EvmOverrides::new(state_overrides, block_overrides), + ) + .map_err(|e| -> jsonrpsee_types::error::ErrorObject<'static> { e.into() })?; + let res = self + .eth_api + .transact(db, evm_env, tx_env) + .map_err(|e| -> jsonrpsee_types::error::ErrorObject<'static> { e.into() })?; + return >::ensure_success(res.result) + .map_err(Into::into); + } + EthApiServer::call( + &self.eth_api, + transaction, + block_number, + state_overrides, + block_overrides, + ) + .await + } + + /// Handler for: `eth_estimateGas` + async fn estimate_gas( + &self, + transaction: OpTransactionRequest, + block_number: Option, + overrides: Option, + ) -> RpcResult { + trace!(target: "rpc::eth", ?transaction, ?block_number, "Serving eth_estimateGas"); + if let Some((state, header)) = self.get_flashblock_state_provider_by_id(block_number)? { + let evm_env = self + .eth_api + .evm_env_for_header(&header) + .map_err(|e| -> jsonrpsee_types::error::ErrorObject<'static> { e.into() })?; + return self + .eth_api + .estimate_gas_with(evm_env, transaction, state, overrides) + .map_err(Into::into); + } + EthApiServer::estimate_gas(&self.eth_api, transaction, block_number, overrides).await + } + + /// Handler for: `eth_getBalance` + async fn balance(&self, address: Address, block_number: Option) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getBalance"); + if let Some((state, _)) = self.get_flashblock_state_provider_by_id(block_number)? { + return Ok(state.account_balance(&address).to_rpc_result()?.unwrap_or_default()); + } + EthApiServer::balance(&self.eth_api, address, block_number).await + } + + /// Handler for: `eth_getTransactionCount` + async fn transaction_count( + &self, + address: Address, + block_number: Option, + ) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getTransactionCount"); + if let Some((state, _)) = self.get_flashblock_state_provider_by_id(block_number)? { + return Ok(U256::from( + state.account_nonce(&address).to_rpc_result()?.unwrap_or_default(), + )); + } + EthApiServer::transaction_count(&self.eth_api, address, block_number).await + } + + /// Handler for: `eth_getCode` + async fn get_code(&self, address: Address, block_number: Option) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getCode"); + if let Some((state, _)) = self.get_flashblock_state_provider_by_id(block_number)? { + return Ok(state + .account_code(&address) + .to_rpc_result()? + .map(|code| code.original_bytes()) + .unwrap_or_default()); + } + EthApiServer::get_code(&self.eth_api, address, block_number).await + } + + /// Handler for: `eth_getStorageAt` + async fn storage_at( + &self, + address: Address, + slot: JsonStorageKey, + block_number: Option, + ) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?slot, ?block_number, "Serving eth_getStorageAt"); + if let Some((state, _)) = self.get_flashblock_state_provider_by_id(block_number)? { + return Ok(B256::new( + state + .storage(address, slot.as_b256()) + .to_rpc_result()? + .unwrap_or_default() + .to_be_bytes(), + )); + } + EthApiServer::storage_at(&self.eth_api, address, slot, block_number).await + } +} + +impl FlashblocksEthApiExt +where + N: RpcNodeCore, + Rpc: RpcConvert + + RpcConvert, + OpEthApi: RpcNodeCore + Send + Sync + 'static, +{ + /// Returns a `StateProvider` overlaying flashblock execution state on top of canonical state + /// for the given block ID. Returns `None` if the block is not in the flashblocks cache. + fn get_flashblock_state_provider_by_id( + &self, + block_id: Option, + ) -> RpcResult)>> { + let canon_state = self.eth_api.provider().latest().to_rpc_result()?; + Ok(self.flashblocks_state.get_state_provider_by_id(block_id, canon_state)) + } +} diff --git a/crates/rpc/src/helper.rs b/crates/rpc/src/helper.rs new file mode 100644 index 00000000..bc7c9d41 --- /dev/null +++ b/crates/rpc/src/helper.rs @@ -0,0 +1,131 @@ +use alloy_consensus::{BlockHeader, TxReceipt}; +use alloy_primitives::B256; +use alloy_rpc_types_eth::TransactionInfo; +use op_alloy_network::Optimism; + +use reth_optimism_primitives::OpPrimitives; +use reth_primitives_traits::{Recovered, SignedTransaction, TransactionMeta}; +use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert, RpcTransaction}; +use reth_rpc_eth_api::{RpcBlock, RpcReceipt}; +use reth_rpc_eth_types::block::BlockAndReceipts; + +use xlayer_flashblocks::CachedTxInfo; + +/// Converter for `TransactionMeta` +pub(crate) fn build_tx_meta( + bar: &BlockAndReceipts, + tx_hash: B256, + index: u64, +) -> TransactionMeta { + TransactionMeta { + tx_hash, + index, + block_hash: bar.block.hash(), + block_number: bar.block.number(), + base_fee: bar.block.base_fee_per_gas(), + excess_blob_gas: bar.block.excess_blob_gas(), + timestamp: bar.block.timestamp(), + } +} + +/// Converter for `TransactionInfo` +pub(crate) fn build_tx_info( + bar: &BlockAndReceipts, + tx_hash: B256, + index: u64, +) -> TransactionInfo { + TransactionInfo { + hash: Some(tx_hash), + index: Some(index), + block_hash: Some(bar.block.hash()), + block_number: Some(bar.block.number()), + base_fee: bar.block.base_fee_per_gas(), + } +} + +/// Converts a `BlockAndReceipts` into an RPC block. +pub(crate) fn to_rpc_block( + bar: &BlockAndReceipts, + full: bool, + converter: &Rpc, +) -> Result, Rpc::Error> +where + Rpc: RpcConvert, +{ + bar.block.clone_into_rpc_block( + full.into(), + |tx, tx_info| converter.fill(tx, tx_info), + |header, size| converter.convert_header(header, size), + ) +} + +/// Converts all receipts from a `BlockAndReceipts` into RPC receipts. +pub(crate) fn to_block_receipts( + bar: &BlockAndReceipts, + converter: &Rpc, +) -> Result>, Rpc::Error> +where + Rpc: RpcConvert, +{ + let txs = bar.block.body().transactions(); + let senders = bar.block.senders(); + let receipts = bar.receipts.as_ref(); + + let mut prev_cumulative_gas = 0u64; + let mut next_log_index = 0usize; + + let inputs = txs + .zip(senders.iter()) + .zip(receipts.iter()) + .enumerate() + .map(|(idx, ((tx, sender), receipt))| { + let gas_used = receipt.cumulative_gas_used() - prev_cumulative_gas; + prev_cumulative_gas = receipt.cumulative_gas_used(); + let logs_len = receipt.logs().len(); + + let meta = build_tx_meta(bar, tx.tx_hash(), idx as u64); + let input = ConvertReceiptInput { + tx: Recovered::new_unchecked(tx, *sender), + gas_used, + next_log_index, + meta, + receipt: receipt.clone(), + }; + + next_log_index += logs_len; + + input + }) + .collect::>(); + + converter.convert_receipts_with_block(inputs, bar.sealed_block()) +} + +/// Converts a `CachedTxInfo` and `BlockAndReceipts` into an RPC transaction. +pub(crate) fn to_rpc_transaction( + info: &CachedTxInfo, + bar: &BlockAndReceipts, + converter: &Rpc, +) -> Result, Rpc::Error> +where + Rpc: RpcConvert, +{ + let tx_info = build_tx_info(bar, info.tx.tx_hash(), info.tx_index); + converter.fill(info.tx.clone().try_into_recovered().expect("valid cached tx"), tx_info) +} + +/// Converts a `BlockAndReceipts` and transaction index into an RPC transaction. +pub(crate) fn to_rpc_transaction_from_bar_and_index( + bar: &BlockAndReceipts, + index: usize, + converter: &Rpc, +) -> Result>, Rpc::Error> +where + Rpc: RpcConvert, +{ + if let Some((signer, tx)) = bar.block.transactions_with_sender().nth(index) { + let tx_info = build_tx_info(bar, tx.tx_hash(), index as u64); + return Ok(Some(converter.fill(tx.clone().with_signer(*signer), tx_info)?)); + } + Ok(None) +} diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index bb620fb6..ea567160 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -1,18 +1,17 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg))] -pub mod xlayer_ext; +pub mod default; +pub mod flashblocks; +pub mod helper; -use std::time::Instant; -// Re-export for convenience -pub use xlayer_ext::{ - PendingFlashBlockProvider, SequencerClientProvider, XlayerRpcExt, XlayerRpcExtApiServer, -}; +pub use default::{DefaultRpcExt, DefaultRpcExtApiServer, SequencerClientProvider}; +pub use flashblocks::{FlashblocksEthApiExt, FlashblocksEthApiOverrideServer}; -// Implement SequencerClientProvider for OpEthApi use reth_optimism_rpc::{OpEthApi, SequencerClient}; use reth_rpc_eth_api::{RpcConvert, RpcNodeCore}; +// Implement `SequencerClientProvider` for `OpEthApi` impl SequencerClientProvider for OpEthApi where N: RpcNodeCore, @@ -22,17 +21,3 @@ where self.sequencer_client() } } - -impl PendingFlashBlockProvider for OpEthApi -where - N: RpcNodeCore, - Rpc: RpcConvert, -{ - fn has_pending_flashblock(&self) -> bool { - self.pending_block_rx().is_some_and(|rx| { - rx.borrow() - .as_ref() - .is_some_and(|pending_flashblock| Instant::now() < pending_flashblock.expires_at) - }) - } -} diff --git a/crates/rpc/src/xlayer_ext.rs b/crates/rpc/src/xlayer_ext.rs deleted file mode 100644 index 3eaec3de..00000000 --- a/crates/rpc/src/xlayer_ext.rs +++ /dev/null @@ -1,105 +0,0 @@ -use std::sync::Arc; - -use jsonrpsee::{ - core::{async_trait, RpcResult}, - proc_macros::rpc, -}; - -use reth_optimism_rpc::SequencerClient; -use reth_rpc::RpcTypes; - -/// Trait for accessing sequencer client from backend -pub trait SequencerClientProvider { - /// Returns the sequencer client if available - fn sequencer_client(&self) -> Option<&SequencerClient>; -} - -/// Trait for checking if pending block (flashblocks) is enabled -pub trait PendingFlashBlockProvider { - /// Returns true if pending block receiver is available and has actual pending block data (flashblocks enabled) - fn has_pending_flashblock(&self) -> bool; -} - -/// XLayer-specific RPC API trait -#[rpc(server, namespace = "eth", server_bounds( - Net: 'static + RpcTypes, - ::TransactionRequest: - serde::de::DeserializeOwned + serde::Serialize -))] -pub trait XlayerRpcExtApi { - /// Returns boolean indicating if the node's flashblocks functionality is enabled and working. - #[method(name = "flashblocksEnabled")] - async fn flashblocks_enabled(&self) -> RpcResult; -} - -/// XLayer RPC extension implementation -#[derive(Debug)] -pub struct XlayerRpcExt { - pub backend: Arc, -} - -#[async_trait] -impl XlayerRpcExtApiServer for XlayerRpcExt -where - T: PendingFlashBlockProvider + Send + Sync + 'static, - Net: RpcTypes + Send + Sync + 'static, -{ - async fn flashblocks_enabled(&self) -> RpcResult { - Ok(self.backend.has_pending_flashblock()) - } -} - -#[cfg(test)] -mod tests { - use super::PendingFlashBlockProvider; - use std::time::{Duration, Instant}; - use tokio::sync::watch; - - struct MockPendingFlashBlock { - expires_at: Instant, - } - - struct MockPendingFlashBlockProvider { - rx: Option>>, - } - - impl PendingFlashBlockProvider for MockPendingFlashBlockProvider { - fn has_pending_flashblock(&self) -> bool { - self.rx.as_ref().is_some_and(|rx| { - rx.borrow().as_ref().is_some_and(|pending_flashblock| { - Instant::now() < pending_flashblock.expires_at - }) - }) - } - } - - #[test] - fn test_no_receiver_returns_false() { - let provider = MockPendingFlashBlockProvider { rx: None }; - assert!(!provider.has_pending_flashblock()); - } - - #[test] - fn test_empty_receiver_returns_false() { - let (_tx, rx) = watch::channel(None); - let provider = MockPendingFlashBlockProvider { rx: Some(rx) }; - assert!(!provider.has_pending_flashblock()); - } - - #[test] - fn test_expired_flashblock_returns_false() { - let expired = - MockPendingFlashBlock { expires_at: Instant::now() - Duration::from_secs(60) }; - let (_tx, rx) = watch::channel(Some(expired)); - let provider = MockPendingFlashBlockProvider { rx: Some(rx) }; - assert!(!provider.has_pending_flashblock()); - } - - #[test] - fn test_valid_flashblock_returns_true() { - let valid = MockPendingFlashBlock { expires_at: Instant::now() + Duration::from_secs(60) }; - let (_tx, rx) = watch::channel(Some(valid)); - let provider = MockPendingFlashBlockProvider { rx: Some(rx) }; - assert!(provider.has_pending_flashblock()); - } -}