From 5f92f5c49d4c2c24906858f8b782748b9663a487 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 16 Oct 2025 18:29:39 +0900 Subject: [PATCH 001/122] feat: add backon library --- Cargo.lock | 24 ++++++++++++++++++++++++ Cargo.toml | 2 ++ 2 files changed, 26 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 3b0e4753..3d612261 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1034,6 +1034,17 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "backon" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "592277618714fbcecda9a02ba7a8781f319d26532a88553bbacc77ba5d2b3a8d" +dependencies = [ + "fastrand", + "gloo-timers", + "tokio", +] + [[package]] name = "backtrace" version = "0.3.75" @@ -1590,6 +1601,7 @@ dependencies = [ "alloy-node-bindings", "anyhow", "async-trait", + "backon", "chrono", "serde", "serde_json", @@ -1839,6 +1851,18 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" +[[package]] +name = "gloo-timers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "group" version = "0.13.0" diff --git a/Cargo.toml b/Cargo.toml index a27bb890..59b907d0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,6 +32,7 @@ tokio-stream = "0.1.17" tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["fmt", "env-filter"] } hex = "0.4" +backon = "1.5.2" [package] name = "event-scanner" @@ -65,6 +66,7 @@ alloy-node-bindings.workspace = true tokio-stream.workspace = true tracing.workspace = true tracing-subscriber.workspace = true +backon.workspace = true [lints] workspace = true From 2ad65163818913773bd4de8734fa6af3b81be8c8 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 16 Oct 2025 21:11:26 +0900 Subject: [PATCH 002/122] feat: add provider wrapper doc doc --- src/lib.rs | 8 +++ src/safe_provider.rs | 150 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 158 insertions(+) create mode 100644 src/safe_provider.rs diff --git a/src/lib.rs b/src/lib.rs index 38ed609a..d687f03f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,6 +2,14 @@ pub mod block_range_scanner; pub mod event_filter; pub mod event_listener; pub mod event_scanner; +pub mod safe_provider; pub mod types; +pub use block_range_scanner::{ + BlockRangeMessage, BlockRangeScanner, BlockRangeScannerClient, BlockRangeScannerError, + DEFAULT_BLOCK_CONFIRMATIONS, DEFAULT_BLOCKS_READ_PER_EPOCH, +}; pub use event_filter::EventFilter; +pub use event_scanner::{EventScanner, EventScannerError, EventScannerMessage}; +pub use safe_provider::{SafeProvider, SafeProviderError}; + diff --git a/src/safe_provider.rs b/src/safe_provider.rs new file mode 100644 index 00000000..fce66a3f --- /dev/null +++ b/src/safe_provider.rs @@ -0,0 +1,150 @@ +//! Safe provider wrapper with built-in retry and timeout mechanisms. +//! +//! This module provides a wrapper around Alloy providers that automatically +//! handles retries, timeouts, and error logging for RPC calls. +//! +//! # Example +//! +//! ```rust,no_run +//! use alloy::{ +//! network::Ethereum, +//! providers::{RootProvider, WsConnect}, +//! rpc::client::ClientBuilder, +//! }; +//! use event_scanner::safe_provider::SafeProvider; +//! use std::time::Duration; +//! +//! async fn example() -> Result<(), Box> { +//! let provider = RootProvider::::new( +//! ClientBuilder::default().ws(WsConnect::new("wss://localhost:8000")).await?, +//! ); +//! let safe_provider = +//! SafeProvider::new(provider).with_timeout(Duration::from_secs(30)).with_max_retries(5); +//! +//! let block = safe_provider.get_block_by_number(12345.into()).await?; +//! Ok(()) +//! } +//! ``` + +use std::{sync::Arc, time::Duration}; + +use alloy::{ + eips::BlockNumberOrTag, + network::Network, + providers::{Provider, RootProvider}, + transports::{RpcError, TransportErrorKind}, +}; +use backon::{ExponentialBuilder, Retryable}; +use thiserror::Error; +use tokio::time::timeout; +use tracing::warn; + +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(30); +const DEFAULT_MAX_RETRIES: usize = 5; +const DEFAULT_RETRY_INTERVAL: Duration = Duration::from_secs(1); + +#[derive(Error, Debug, Clone)] +pub enum SafeProviderError { + #[error("RPC error: {0}")] + RpcError(Arc>), + + #[error("Request timeout after {0:?}")] + Timeout(Duration), + + #[error("Block not found: {0}")] + BlockNotFound(BlockNumberOrTag), + + #[error("All retry attempts exhausted")] + RetryExhausted, +} + +impl From> for SafeProviderError { + fn from(error: RpcError) -> Self { + SafeProviderError::RpcError(Arc::new(error)) + } +} + +#[derive(Clone)] +pub struct SafeProvider { + provider: RootProvider, + timeout: Duration, + max_retries: usize, + retry_interval: Duration, +} + +impl SafeProvider { + #[must_use] + pub fn new(provider: RootProvider) -> Self { + Self { + provider, + timeout: DEFAULT_TIMEOUT, + max_retries: DEFAULT_MAX_RETRIES, + retry_interval: DEFAULT_RETRY_INTERVAL, + } + } + + #[must_use] + pub fn with_timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } + + #[must_use] + pub fn with_max_retries(mut self, max_retries: usize) -> Self { + self.max_retries = max_retries; + self + } + + #[must_use] + pub fn with_retry_interval(mut self, retry_interval: Duration) -> Self { + self.retry_interval = retry_interval; + self + } + + #[must_use] + pub fn inner(&self) -> &RootProvider { + &self.provider + } + + #[allow(clippy::missing_errors_doc)] + pub async fn get_block_by_number( + &self, + number: BlockNumberOrTag, + ) -> Result, SafeProviderError> { + let timeout_duration = self.timeout; + let provider = self.provider.clone(); + + let operation = || async { + let result = timeout(timeout_duration, provider.get_block_by_number(number)).await; + + match result { + Ok(Ok(block)) => Ok(block), + Ok(Err(e)) => { + warn!("RPC error fetching block {number}: {e}"); + Err(SafeProviderError::from(e)) + } + Err(_) => { + warn!("Timeout fetching block {number} after {timeout_duration:?}"); + Err(SafeProviderError::Timeout(timeout_duration)) + } + } + }; + + let retry_strategy = ExponentialBuilder::default() + .with_max_times(self.max_retries) + .with_min_delay(self.retry_interval); + + operation.retry(retry_strategy).sleep(tokio::time::sleep).await + } + + // pub async fn get_block_number(&self) -> Result { + // Ok(result) + // } + // + // pub async fn get_block_by_hash( + // &self, + // hash: alloy::primitives::BlockHash, + // ) -> Result, SafeProviderError> { + // Ok(result) + // } +} From 22c2448d673a363e4139ab943ae997a4407fd456 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 16 Oct 2025 21:43:16 +0900 Subject: [PATCH 003/122] feat: use internal --- src/safe_provider.rs | 59 ++++++++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 27 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index fce66a3f..f418f113 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -37,7 +37,6 @@ use alloy::{ use backon::{ExponentialBuilder, Retryable}; use thiserror::Error; use tokio::time::timeout; -use tracing::warn; const DEFAULT_TIMEOUT: Duration = Duration::from_secs(30); const DEFAULT_MAX_RETRIES: usize = 5; @@ -111,22 +110,38 @@ impl SafeProvider { &self, number: BlockNumberOrTag, ) -> Result, SafeProviderError> { - let timeout_duration = self.timeout; let provider = self.provider.clone(); + self.retry_with_timeout(|| async { provider.get_block_by_number(number).await }).await + } - let operation = || async { - let result = timeout(timeout_duration, provider.get_block_by_number(number)).await; - - match result { - Ok(Ok(block)) => Ok(block), - Ok(Err(e)) => { - warn!("RPC error fetching block {number}: {e}"); - Err(SafeProviderError::from(e)) - } - Err(_) => { - warn!("Timeout fetching block {number} after {timeout_duration:?}"); - Err(SafeProviderError::Timeout(timeout_duration)) - } + #[allow(clippy::missing_errors_doc)] + pub async fn get_block_number(&self) -> Result { + let provider = self.provider.clone(); + self.retry_with_timeout(|| async { provider.get_block_number().await }).await + } + + #[allow(clippy::missing_errors_doc)] + pub async fn get_block_by_hash( + &self, + hash: alloy::primitives::BlockHash, + ) -> Result, SafeProviderError> { + let provider = self.provider.clone(); + self.retry_with_timeout(|| async { provider.get_block_by_hash(hash).await }).await + } + + #[allow(clippy::missing_errors_doc)] + async fn retry_with_timeout(&self, operation: F) -> Result + where + F: Fn() -> Fut, + Fut: Future>>, + { + let timeout_duration = self.timeout; + + let wrapped_operation = || async { + match timeout(timeout_duration, operation()).await { + Ok(Ok(result)) => Ok(result), + Ok(Err(e)) => Err(SafeProviderError::from(e)), + Err(_) => Err(SafeProviderError::Timeout(timeout_duration)), } }; @@ -134,17 +149,7 @@ impl SafeProvider { .with_max_times(self.max_retries) .with_min_delay(self.retry_interval); - operation.retry(retry_strategy).sleep(tokio::time::sleep).await + wrapped_operation.retry(retry_strategy).sleep(tokio::time::sleep).await } - - // pub async fn get_block_number(&self) -> Result { - // Ok(result) - // } - // - // pub async fn get_block_by_hash( - // &self, - // hash: alloy::primitives::BlockHash, - // ) -> Result, SafeProviderError> { - // Ok(result) - // } } + From d738ab291a97c88bccdedbc9010acddf143fd7a1 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 16 Oct 2025 21:56:16 +0900 Subject: [PATCH 004/122] feat: add get logs to safe provider --- src/safe_provider.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index f418f113..bdbc215c 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -26,12 +26,13 @@ //! } //! ``` -use std::{sync::Arc, time::Duration}; +use std::{future::Future, sync::Arc, time::Duration}; use alloy::{ eips::BlockNumberOrTag, network::Network, providers::{Provider, RootProvider}, + rpc::types::{Filter, Log}, transports::{RpcError, TransportErrorKind}, }; use backon::{ExponentialBuilder, Retryable}; @@ -129,6 +130,13 @@ impl SafeProvider { self.retry_with_timeout(|| async { provider.get_block_by_hash(hash).await }).await } + #[allow(clippy::missing_errors_doc)] + pub async fn get_logs(&self, filter: &Filter) -> Result, SafeProviderError> { + let provider = self.provider.clone(); + let filter = filter.clone(); + self.retry_with_timeout(|| async { provider.get_logs(&filter).await }).await + } + #[allow(clippy::missing_errors_doc)] async fn retry_with_timeout(&self, operation: F) -> Result where @@ -152,4 +160,3 @@ impl SafeProvider { wrapped_operation.retry(retry_strategy).sleep(tokio::time::sleep).await } } - From d068c8e589564ce8cc2819c5538983e9c2eee8b3 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 16 Oct 2025 22:06:32 +0900 Subject: [PATCH 005/122] chore: made var public --- src/safe_provider.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index bdbc215c..1baf129e 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -39,9 +39,9 @@ use backon::{ExponentialBuilder, Retryable}; use thiserror::Error; use tokio::time::timeout; -const DEFAULT_TIMEOUT: Duration = Duration::from_secs(30); -const DEFAULT_MAX_RETRIES: usize = 5; -const DEFAULT_RETRY_INTERVAL: Duration = Duration::from_secs(1); +pub const DEFAULT_TIMEOUT: Duration = Duration::from_secs(30); +pub const DEFAULT_MAX_RETRIES: usize = 5; +pub const DEFAULT_RETRY_INTERVAL: Duration = Duration::from_secs(1); #[derive(Error, Debug, Clone)] pub enum SafeProviderError { From d1e12e5a8922d5ce7a4be21bcdf9b355dcc9c486 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 16 Oct 2025 22:06:54 +0900 Subject: [PATCH 006/122] feat: added safe provider conifgs to block range scanner --- src/block_range_scanner.rs | 89 ++++++++++++++++++++++++++++++-------- 1 file changed, 70 insertions(+), 19 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index eab3ee6f..34216bbe 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -64,7 +64,7 @@ //! } //! ``` -use std::{cmp::Ordering, ops::RangeInclusive, sync::Arc}; +use std::{cmp::Ordering, ops::RangeInclusive, sync::Arc, time::Duration}; use tokio::{ join, @@ -72,7 +72,13 @@ use tokio::{ }; use tokio_stream::{StreamExt, wrappers::ReceiverStream}; -use crate::types::{ScannerMessage, ScannerStatus}; +use crate::{ + safe_provider::{ + DEFAULT_MAX_RETRIES, DEFAULT_RETRY_INTERVAL, DEFAULT_TIMEOUT, SafeProvider, + SafeProviderError, + }, + types::{ScannerMessage, ScannerStatus}, +}; use alloy::{ consensus::BlockHeader, eips::BlockNumberOrTag, @@ -82,7 +88,7 @@ use alloy::{ pubsub::Subscription, rpc::client::ClientBuilder, transports::{ - RpcError, TransportErrorKind, TransportResult, + TransportResult, http::reqwest::{self, Url}, ws::WsConnect, }, @@ -101,6 +107,11 @@ pub const MAX_BUFFERED_MESSAGES: usize = 50000; // is considered final) pub const DEFAULT_REORG_REWIND_DEPTH: u64 = 64; +// RPC retry and timeout settings +pub const DEFAULT_RPC_TIMEOUT: Duration = Duration::from_secs(30); +pub const DEFAULT_RPC_MAX_RETRIES: usize = 5; +pub const DEFAULT_RPC_RETRY_INTERVAL: Duration = Duration::from_secs(1); + // // State sync aware retry settings // const STATE_SYNC_RETRY_INTERVAL: Duration = Duration::from_secs(30); // const STATE_SYNC_MAX_RETRIES: u64 = 12; @@ -138,8 +149,8 @@ pub enum BlockRangeScannerError { #[error("Serialization error: {0}")] SerializationError(Arc), - #[error("RPC error: {0}")] - RpcError(Arc>), + #[error("Safe provider error: {0}")] + SafeProviderError(Arc), #[error("Channel send error")] ChannelError, @@ -175,9 +186,9 @@ impl From for BlockRangeScannerError { } } -impl From> for BlockRangeScannerError { - fn from(error: RpcError) -> Self { - BlockRangeScannerError::RpcError(Arc::new(error)) +impl From for BlockRangeScannerError { + fn from(error: SafeProviderError) -> Self { + BlockRangeScannerError::SafeProviderError(Arc::new(error)) } } @@ -233,6 +244,9 @@ pub struct BlockRangeScanner { blocks_read_per_epoch: usize, max_reorg_depth: u64, block_confirmations: u64, + timeout: Duration, + max_retries: usize, + retry_interval: Duration, } impl Default for BlockRangeScanner { @@ -248,6 +262,9 @@ impl BlockRangeScanner { blocks_read_per_epoch: DEFAULT_BLOCKS_READ_PER_EPOCH, max_reorg_depth: DEFAULT_REORG_REWIND_DEPTH, block_confirmations: DEFAULT_BLOCK_CONFIRMATIONS, + timeout: DEFAULT_TIMEOUT, + max_retries: DEFAULT_MAX_RETRIES, + retry_interval: DEFAULT_RETRY_INTERVAL, } } @@ -269,6 +286,24 @@ impl BlockRangeScanner { self } + #[must_use] + pub fn with_timeout(mut self, rpc_timeout: Duration) -> Self { + self.timeout = rpc_timeout; + self + } + + #[must_use] + pub fn with_max_retries(mut self, rpc_max_retries: usize) -> Self { + self.max_retries = rpc_max_retries; + self + } + + #[must_use] + pub fn with_retry_interval(mut self, rpc_retry_interval: Duration) -> Self { + self.retry_interval = rpc_retry_interval; + self + } + /// Connects to the provider via WebSocket /// /// # Errors @@ -305,8 +340,13 @@ impl BlockRangeScanner { self, provider: RootProvider, ) -> TransportResult> { + let safe_provider = SafeProvider::new(provider) + .with_timeout(self.timeout) + .with_max_retries(self.max_retries) + .with_retry_interval(self.retry_interval); + Ok(ConnectedBlockRangeScanner { - provider, + provider: safe_provider, config: Config { blocks_read_per_epoch: self.blocks_read_per_epoch, reorg_rewind_depth: self.max_reorg_depth, @@ -317,14 +357,14 @@ impl BlockRangeScanner { } pub struct ConnectedBlockRangeScanner { - provider: RootProvider, + provider: SafeProvider, config: Config, } impl ConnectedBlockRangeScanner { - /// Returns the underlying Provider. + /// Returns the `SafeProvider` #[must_use] - pub fn provider(&self) -> &RootProvider { + pub fn provider(&self) -> &SafeProvider { &self.provider } @@ -344,7 +384,7 @@ impl ConnectedBlockRangeScanner { struct Service { config: Config, - provider: RootProvider, + provider: SafeProvider, subscriber: Option>, websocket_connected: bool, processed_count: u64, @@ -354,7 +394,7 @@ struct Service { } impl Service { - pub fn new(config: Config, provider: RootProvider) -> (Self, mpsc::Sender) { + pub fn new(config: Config, provider: SafeProvider) -> (Self, mpsc::Sender) { let (cmd_tx, cmd_rx) = mpsc::channel(100); let service = Self { @@ -450,7 +490,13 @@ impl Service { let range_start = (latest + 1).saturating_sub(block_confirmations); tokio::spawn(async move { - Self::stream_live_blocks(range_start, provider, sender, block_confirmations).await; + Self::stream_live_blocks( + range_start, + provider.inner().clone(), + sender, + block_confirmations, + ) + .await; }); Ok(()) @@ -534,7 +580,7 @@ impl Service { let sender = self.subscriber.clone().ok_or_else(|| BlockRangeScannerError::ServiceShutdown)?; - let provider = self.provider.clone(); + let provider = self.provider.inner().clone(); tokio::spawn(async move { Self::stream_live_blocks(start_block_num, provider, sender, block_confirmations) .await; @@ -554,7 +600,7 @@ impl Service { let (live_block_buffer_sender, live_block_buffer_receiver) = mpsc::channel::(MAX_BUFFERED_MESSAGES); - let provider = self.provider.clone(); + let provider = self.provider.inner().clone(); // The cutoff is the last block we have synced historically // Any block > cutoff will come from the live stream @@ -1104,8 +1150,12 @@ mod tests { Config { blocks_read_per_epoch: 5, reorg_rewind_depth: 5, block_confirmations: 0 } } - fn mocked_provider(asserter: Asserter) -> RootProvider { - RootProvider::new(RpcClient::mocked(asserter)) + fn mocked_provider(asserter: Asserter) -> SafeProvider { + let root_provider = RootProvider::new(RpcClient::mocked(asserter)); + SafeProvider::new(root_provider) + .with_timeout(DEFAULT_RPC_TIMEOUT) + .with_max_retries(DEFAULT_RPC_MAX_RETRIES) + .with_retry_interval(DEFAULT_RPC_RETRY_INTERVAL) } #[test] @@ -1970,3 +2020,4 @@ mod tests { Ok(()) } } + From 0e9aa3bb07e0465d223f8f95bc4f91ade5d33782 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 16 Oct 2025 22:30:14 +0900 Subject: [PATCH 007/122] feat: use safe provider in event scanner --- src/event_scanner.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/src/event_scanner.rs b/src/event_scanner.rs index 3b5c063d..b3bef118 100644 --- a/src/event_scanner.rs +++ b/src/event_scanner.rs @@ -7,12 +7,13 @@ use crate::{ }, event_filter::EventFilter, event_listener::EventListener, + safe_provider::{SafeProvider, SafeProviderError}, types::ScannerMessage, }; use alloy::{ eips::BlockNumberOrTag, network::Network, - providers::{Provider, RootProvider}, + providers::RootProvider, rpc::types::{Filter, Log}, sol_types::SolEvent, transports::{RpcError, TransportErrorKind, http::reqwest::Url}, @@ -80,6 +81,8 @@ pub enum EventScannerError { BlockRangeScanner(#[from] BlockRangeScannerError), #[error("Provider error: {0}")] Provider(Arc>), + #[error("Safe provider error: {0}")] + SafeProvider(Arc), } impl From> for EventScannerError { @@ -88,6 +91,12 @@ impl From> for EventScannerError { } } +impl From for EventScannerError { + fn from(e: SafeProviderError) -> Self { + EventScannerError::SafeProvider(Arc::new(e)) + } +} + impl From> for EventScannerMessage { fn from(e: RpcError) -> Self { EventScannerMessage::Error(e.into()) @@ -100,6 +109,12 @@ impl From for EventScannerMessage { } } +impl From for EventScannerMessage { + fn from(e: EventScannerError) -> Self { + EventScannerMessage::Error(e) + } +} + impl Default for EventScanner { fn default() -> Self { Self::new() @@ -376,8 +391,8 @@ impl ConnectedEventScanner { range: RangeInclusive, event_filter: &EventFilter, log_filter: &Filter, - provider: &RootProvider, - ) -> Result, RpcError> { + provider: &SafeProvider, + ) -> Result, EventScannerError> { let log_filter = log_filter.clone().from_block(*range.start()).to_block(*range.end()); match provider.get_logs(&log_filter).await { @@ -403,7 +418,7 @@ impl ConnectedEventScanner { "failed to get logs for block range" ); - Err(e) + Err(e.into()) } } } From 319940adc50713e9dcbd15a456876acb34e6564f Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 16 Oct 2025 23:20:14 +0900 Subject: [PATCH 008/122] feat: undo safe provider errors --- src/block_range_scanner.rs | 24 +++++++++----------- src/event_scanner.rs | 10 ++------- src/lib.rs | 2 +- src/safe_provider.rs | 45 ++++++++------------------------------ 4 files changed, 22 insertions(+), 59 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 34216bbe..d66555e4 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -73,10 +73,7 @@ use tokio::{ use tokio_stream::{StreamExt, wrappers::ReceiverStream}; use crate::{ - safe_provider::{ - DEFAULT_MAX_RETRIES, DEFAULT_RETRY_INTERVAL, DEFAULT_TIMEOUT, SafeProvider, - SafeProviderError, - }, + safe_provider::{DEFAULT_MAX_RETRIES, DEFAULT_RETRY_INTERVAL, DEFAULT_TIMEOUT, SafeProvider}, types::{ScannerMessage, ScannerStatus}, }; use alloy::{ @@ -88,7 +85,7 @@ use alloy::{ pubsub::Subscription, rpc::client::ClientBuilder, transports::{ - TransportResult, + RpcError, TransportErrorKind, TransportResult, http::reqwest::{self, Url}, ws::WsConnect, }, @@ -149,8 +146,11 @@ pub enum BlockRangeScannerError { #[error("Serialization error: {0}")] SerializationError(Arc), - #[error("Safe provider error: {0}")] - SafeProviderError(Arc), + #[error("Provider error: {0}")] + Provider(Arc>), + + #[error("Block not found, block number: {0}")] + BlockNotFound(BlockNumberOrTag), #[error("Channel send error")] ChannelError, @@ -169,9 +169,6 @@ pub enum BlockRangeScannerError { #[error("WebSocket connection failed after {0} attempts")] WebSocketConnectionFailed(usize), - - #[error("Block not found, block number: {0}")] - BlockNotFound(BlockNumberOrTag), } impl From for BlockRangeScannerError { @@ -186,9 +183,9 @@ impl From for BlockRangeScannerError { } } -impl From for BlockRangeScannerError { - fn from(error: SafeProviderError) -> Self { - BlockRangeScannerError::SafeProviderError(Arc::new(error)) +impl From> for BlockRangeScannerError { + fn from(error: RpcError) -> Self { + BlockRangeScannerError::Provider(Arc::new(error)) } } @@ -2020,4 +2017,3 @@ mod tests { Ok(()) } } - diff --git a/src/event_scanner.rs b/src/event_scanner.rs index b3bef118..9ae703ca 100644 --- a/src/event_scanner.rs +++ b/src/event_scanner.rs @@ -7,7 +7,7 @@ use crate::{ }, event_filter::EventFilter, event_listener::EventListener, - safe_provider::{SafeProvider, SafeProviderError}, + safe_provider::SafeProvider, types::ScannerMessage, }; use alloy::{ @@ -81,8 +81,7 @@ pub enum EventScannerError { BlockRangeScanner(#[from] BlockRangeScannerError), #[error("Provider error: {0}")] Provider(Arc>), - #[error("Safe provider error: {0}")] - SafeProvider(Arc), + } impl From> for EventScannerError { @@ -91,11 +90,6 @@ impl From> for EventScannerError { } } -impl From for EventScannerError { - fn from(e: SafeProviderError) -> Self { - EventScannerError::SafeProvider(Arc::new(e)) - } -} impl From> for EventScannerMessage { fn from(e: RpcError) -> Self { diff --git a/src/lib.rs b/src/lib.rs index c2fd0070..4bd99c6b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,4 +13,4 @@ pub use block_range_scanner::{ }; pub use event_filter::EventFilter; pub use event_scanner::{EventScanner, EventScannerError, EventScannerMessage}; -pub use safe_provider::{SafeProvider, SafeProviderError}; +pub use safe_provider::SafeProvider; diff --git a/src/safe_provider.rs b/src/safe_provider.rs index 1baf129e..46a29a3a 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -26,7 +26,7 @@ //! } //! ``` -use std::{future::Future, sync::Arc, time::Duration}; +use std::{future::Future, time::Duration}; use alloy::{ eips::BlockNumberOrTag, @@ -36,33 +36,14 @@ use alloy::{ transports::{RpcError, TransportErrorKind}, }; use backon::{ExponentialBuilder, Retryable}; -use thiserror::Error; -use tokio::time::timeout; + + pub const DEFAULT_TIMEOUT: Duration = Duration::from_secs(30); pub const DEFAULT_MAX_RETRIES: usize = 5; pub const DEFAULT_RETRY_INTERVAL: Duration = Duration::from_secs(1); -#[derive(Error, Debug, Clone)] -pub enum SafeProviderError { - #[error("RPC error: {0}")] - RpcError(Arc>), - - #[error("Request timeout after {0:?}")] - Timeout(Duration), - - #[error("Block not found: {0}")] - BlockNotFound(BlockNumberOrTag), - #[error("All retry attempts exhausted")] - RetryExhausted, -} - -impl From> for SafeProviderError { - fn from(error: RpcError) -> Self { - SafeProviderError::RpcError(Arc::new(error)) - } -} #[derive(Clone)] pub struct SafeProvider { @@ -110,13 +91,13 @@ impl SafeProvider { pub async fn get_block_by_number( &self, number: BlockNumberOrTag, - ) -> Result, SafeProviderError> { + ) -> Result, RpcError> { let provider = self.provider.clone(); self.retry_with_timeout(|| async { provider.get_block_by_number(number).await }).await } #[allow(clippy::missing_errors_doc)] - pub async fn get_block_number(&self) -> Result { + pub async fn get_block_number(&self) -> Result> { let provider = self.provider.clone(); self.retry_with_timeout(|| async { provider.get_block_number().await }).await } @@ -125,33 +106,25 @@ impl SafeProvider { pub async fn get_block_by_hash( &self, hash: alloy::primitives::BlockHash, - ) -> Result, SafeProviderError> { + ) -> Result, RpcError> { let provider = self.provider.clone(); self.retry_with_timeout(|| async { provider.get_block_by_hash(hash).await }).await } #[allow(clippy::missing_errors_doc)] - pub async fn get_logs(&self, filter: &Filter) -> Result, SafeProviderError> { + pub async fn get_logs(&self, filter: &Filter) -> Result, RpcError> { let provider = self.provider.clone(); let filter = filter.clone(); self.retry_with_timeout(|| async { provider.get_logs(&filter).await }).await } #[allow(clippy::missing_errors_doc)] - async fn retry_with_timeout(&self, operation: F) -> Result + async fn retry_with_timeout(&self, operation: F) -> Result> where F: Fn() -> Fut, Fut: Future>>, { - let timeout_duration = self.timeout; - - let wrapped_operation = || async { - match timeout(timeout_duration, operation()).await { - Ok(Ok(result)) => Ok(result), - Ok(Err(e)) => Err(SafeProviderError::from(e)), - Err(_) => Err(SafeProviderError::Timeout(timeout_duration)), - } - }; + let wrapped_operation = || async { operation().await }; let retry_strategy = ExponentialBuilder::default() .with_max_times(self.max_retries) From 65527b8232150788e262558285bb8fddb4c067b4 Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 20 Oct 2025 21:46:50 +0900 Subject: [PATCH 009/122] feat: implement stream block remove import remove wrapped --- src/block_range_scanner.rs | 21 ++++++++------------- src/safe_provider.rs | 30 +++++++++++++++++++----------- 2 files changed, 27 insertions(+), 24 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index d66555e4..f68f2536 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -81,7 +81,7 @@ use alloy::{ eips::BlockNumberOrTag, network::{BlockResponse, Network, primitives::HeaderResponse}, primitives::{B256, BlockNumber}, - providers::{Provider, RootProvider}, + providers::RootProvider, pubsub::Subscription, rpc::client::ClientBuilder, transports::{ @@ -487,13 +487,7 @@ impl Service { let range_start = (latest + 1).saturating_sub(block_confirmations); tokio::spawn(async move { - Self::stream_live_blocks( - range_start, - provider.inner().clone(), - sender, - block_confirmations, - ) - .await; + Self::stream_live_blocks(range_start, provider, sender, block_confirmations).await; }); Ok(()) @@ -577,7 +571,7 @@ impl Service { let sender = self.subscriber.clone().ok_or_else(|| BlockRangeScannerError::ServiceShutdown)?; - let provider = self.provider.inner().clone(); + let provider = self.provider.clone(); tokio::spawn(async move { Self::stream_live_blocks(start_block_num, provider, sender, block_confirmations) .await; @@ -597,7 +591,7 @@ impl Service { let (live_block_buffer_sender, live_block_buffer_receiver) = mpsc::channel::(MAX_BUFFERED_MESSAGES); - let provider = self.provider.inner().clone(); + let provider = self.provider.clone(); // The cutoff is the last block we have synced historically // Any block > cutoff will come from the live stream @@ -792,9 +786,9 @@ impl Service { Ok(()) } - async fn stream_live_blocks>( + async fn stream_live_blocks( mut range_start: BlockNumber, - provider: P, + provider: SafeProvider, sender: mpsc::Sender, block_confirmations: u64, ) { @@ -900,7 +894,7 @@ impl Service { } async fn get_block_subscription( - provider: &impl Provider, + provider: &SafeProvider, ) -> Result, BlockRangeScannerError> { let ws_stream = provider .subscribe_blocks() @@ -1128,6 +1122,7 @@ impl BlockRangeScannerClient { #[cfg(test)] mod tests { + use alloy::providers::Provider; use std::time::Duration; use tokio::time::timeout; diff --git a/src/safe_provider.rs b/src/safe_provider.rs index 46a29a3a..d3bcbc5f 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -32,19 +32,16 @@ use alloy::{ eips::BlockNumberOrTag, network::Network, providers::{Provider, RootProvider}, + pubsub::Subscription, rpc::types::{Filter, Log}, transports::{RpcError, TransportErrorKind}, }; use backon::{ExponentialBuilder, Retryable}; - - pub const DEFAULT_TIMEOUT: Duration = Duration::from_secs(30); pub const DEFAULT_MAX_RETRIES: usize = 5; pub const DEFAULT_RETRY_INTERVAL: Duration = Duration::from_secs(1); - - #[derive(Clone)] pub struct SafeProvider { provider: RootProvider, @@ -112,24 +109,35 @@ impl SafeProvider { } #[allow(clippy::missing_errors_doc)] - pub async fn get_logs(&self, filter: &Filter) -> Result, RpcError> { + pub async fn get_logs( + &self, + filter: &Filter, + ) -> Result, RpcError> { + let provider = self.provider.clone(); + self.retry_with_timeout(|| async { provider.get_logs(filter).await }).await + } + + #[allow(clippy::missing_errors_doc)] + pub async fn subscribe_blocks( + &self, + ) -> Result, RpcError> { let provider = self.provider.clone(); - let filter = filter.clone(); - self.retry_with_timeout(|| async { provider.get_logs(&filter).await }).await + self.retry_with_timeout(|| async { provider.subscribe_blocks().await }).await } #[allow(clippy::missing_errors_doc)] - async fn retry_with_timeout(&self, operation: F) -> Result> + async fn retry_with_timeout( + &self, + operation: F, + ) -> Result> where F: Fn() -> Fut, Fut: Future>>, { - let wrapped_operation = || async { operation().await }; - let retry_strategy = ExponentialBuilder::default() .with_max_times(self.max_retries) .with_min_delay(self.retry_interval); - wrapped_operation.retry(retry_strategy).sleep(tokio::time::sleep).await + operation.retry(retry_strategy).sleep(tokio::time::sleep).await } } From b5bf428de90df422877a87df5aefb8c3cf26c232 Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 20 Oct 2025 22:46:53 +0900 Subject: [PATCH 010/122] test: add basic testing to safe provider --- src/safe_provider.rs | 100 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 99 insertions(+), 1 deletion(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index d3bcbc5f..d0f57863 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -126,7 +126,7 @@ impl SafeProvider { } #[allow(clippy::missing_errors_doc)] - async fn retry_with_timeout( + pub(crate) async fn retry_with_timeout( &self, operation: F, ) -> Result> @@ -136,8 +136,106 @@ impl SafeProvider { { let retry_strategy = ExponentialBuilder::default() .with_max_times(self.max_retries) + .with_total_delay(Some(self.timeout)) .with_min_delay(self.retry_interval); operation.retry(retry_strategy).sleep(tokio::time::sleep).await } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy::network::Ethereum; + use std::sync::{Arc, Mutex}; + + fn create_test_provider( + timeout: Duration, + max_retries: usize, + retry_interval: Duration, + ) -> SafeProvider { + SafeProvider { + provider: RootProvider::::new_http("http://localhost:8545".parse().unwrap()), + timeout, + max_retries, + retry_interval, + } + } + + #[tokio::test] + async fn test_retry_with_timeout_succeeds_on_first_attempt() { + let provider = + create_test_provider(Duration::from_millis(100), 3, Duration::from_millis(10)); + + let call_count = Arc::new(Mutex::new(0)); + let call_count_clone = call_count.clone(); + + let result = provider + .retry_with_timeout(move || { + let count = call_count_clone.clone(); + async move { + let mut c = count.lock().unwrap(); + *c += 1; + Ok(42) + } + }) + .await; + + assert!(result.is_ok()); + assert_eq!(result.unwrap(), 42); + assert_eq!(*call_count.lock().unwrap(), 1); + } + + #[tokio::test] + async fn test_retry_with_timeout_retries_on_error() { + let provider = + create_test_provider(Duration::from_millis(100), 3, Duration::from_millis(10)); + + let call_count = Arc::new(Mutex::new(0)); + let call_count_clone = call_count.clone(); + + let result = provider + .retry_with_timeout(move || { + let count = call_count_clone.clone(); + async move { + let mut c = count.lock().unwrap(); + *c += 1; + if *c < 3 { + Err(TransportErrorKind::custom_str("temporary error")) + } else { + Ok(42) + } + } + }) + .await; + + assert!(result.is_ok()); + assert_eq!(result.unwrap(), 42); + assert_eq!(*call_count.lock().unwrap(), 3); + } + + #[tokio::test] + async fn test_retry_with_timeout_fails_after_max_retries() { + let provider = + create_test_provider(Duration::from_millis(100), 2, Duration::from_millis(10)); + + let call_count = Arc::new(Mutex::new(0)); + let call_count_clone = call_count.clone(); + + let result = provider + .retry_with_timeout(move || { + let count = call_count_clone.clone(); + async move { + let mut c = count.lock().unwrap(); + *c += 1; + Err::>(TransportErrorKind::custom_str( + "permanent error", + )) + } + }) + .await; + + assert!(result.is_err()); + assert_eq!(*call_count.lock().unwrap(), 3); + } +} From 6241e8375390bfe4ab8d7d348312b3fe558c1d23 Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 20 Oct 2025 22:48:44 +0900 Subject: [PATCH 011/122] chore: delete other constants --- src/block_range_scanner.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index f68f2536..9c4589f3 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -109,10 +109,6 @@ pub const DEFAULT_RPC_TIMEOUT: Duration = Duration::from_secs(30); pub const DEFAULT_RPC_MAX_RETRIES: usize = 5; pub const DEFAULT_RPC_RETRY_INTERVAL: Duration = Duration::from_secs(1); -// // State sync aware retry settings -// const STATE_SYNC_RETRY_INTERVAL: Duration = Duration::from_secs(30); -// const STATE_SYNC_MAX_RETRIES: u64 = 12; - pub type BlockRangeMessage = ScannerMessage, BlockRangeScannerError>; impl From, BlockRangeScannerError>> for BlockRangeMessage { From 79eaaaef91e21b0d3b9e8e62dce575a99b458b81 Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 20 Oct 2025 22:51:02 +0900 Subject: [PATCH 012/122] fix: fmt --- src/event_scanner.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/event_scanner.rs b/src/event_scanner.rs index 9ae703ca..464c518a 100644 --- a/src/event_scanner.rs +++ b/src/event_scanner.rs @@ -81,7 +81,6 @@ pub enum EventScannerError { BlockRangeScanner(#[from] BlockRangeScannerError), #[error("Provider error: {0}")] Provider(Arc>), - } impl From> for EventScannerError { @@ -90,7 +89,6 @@ impl From> for EventScannerError { } } - impl From> for EventScannerMessage { fn from(e: RpcError) -> Self { EventScannerMessage::Error(e.into()) From 112c8600ee78218e34db9f73ca15eefe6be3c3bb Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 21 Oct 2025 21:56:30 +0900 Subject: [PATCH 013/122] feat: add logging to rpc calls --- src/safe_provider.rs | 38 +++++++++++++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index d0f57863..4fde8150 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -37,6 +37,7 @@ use alloy::{ transports::{RpcError, TransportErrorKind}, }; use backon::{ExponentialBuilder, Retryable}; +use tracing::{debug, error}; pub const DEFAULT_TIMEOUT: Duration = Duration::from_secs(30); pub const DEFAULT_MAX_RETRIES: usize = 5; @@ -89,14 +90,25 @@ impl SafeProvider { &self, number: BlockNumberOrTag, ) -> Result, RpcError> { + debug!("SafeProvider eth_getBlockByNumber called with number: {:?}", number); let provider = self.provider.clone(); - self.retry_with_timeout(|| async { provider.get_block_by_number(number).await }).await + let result = + self.retry_with_timeout(|| async { provider.get_block_by_number(number).await }).await; + if let Err(e) = &result { + error!("SafeProvider eth_getByBlockNumber failed: {}", e); + } + result } #[allow(clippy::missing_errors_doc)] pub async fn get_block_number(&self) -> Result> { + debug!("SafeProvider eth_getBlockNumber called"); let provider = self.provider.clone(); - self.retry_with_timeout(|| async { provider.get_block_number().await }).await + let result = self.retry_with_timeout(|| async { provider.get_block_number().await }).await; + if let Err(e) = &result { + error!("SafeProvider eth_getBlockNumber failed: {}", e); + } + result } #[allow(clippy::missing_errors_doc)] @@ -104,8 +116,14 @@ impl SafeProvider { &self, hash: alloy::primitives::BlockHash, ) -> Result, RpcError> { + debug!("SafeProvider eth_getBlockByHash called with hash: {:?}", hash); let provider = self.provider.clone(); - self.retry_with_timeout(|| async { provider.get_block_by_hash(hash).await }).await + let result = + self.retry_with_timeout(|| async { provider.get_block_by_hash(hash).await }).await; + if let Err(e) = &result { + error!("SafeProvider eth_getBlockByHash failed: {}", e); + } + result } #[allow(clippy::missing_errors_doc)] @@ -113,16 +131,26 @@ impl SafeProvider { &self, filter: &Filter, ) -> Result, RpcError> { + debug!("eth_getLogs called with filter: {:?}", filter); let provider = self.provider.clone(); - self.retry_with_timeout(|| async { provider.get_logs(filter).await }).await + let result = self.retry_with_timeout(|| async { provider.get_logs(filter).await }).await; + if let Err(e) = &result { + error!("SafeProvider eth_getLogs failed: {}", e); + } + result } #[allow(clippy::missing_errors_doc)] pub async fn subscribe_blocks( &self, ) -> Result, RpcError> { + debug!("eth_subscribe called"); let provider = self.provider.clone(); - self.retry_with_timeout(|| async { provider.subscribe_blocks().await }).await + let result = self.retry_with_timeout(|| async { provider.subscribe_blocks().await }).await; + if let Err(e) = &result { + error!("SafeProvider eth_subscribe failed: {}", e); + } + result } #[allow(clippy::missing_errors_doc)] From 9fc65430b3744621aefa4cc47ad66f4caebdcd6f Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 21 Oct 2025 22:42:08 +0900 Subject: [PATCH 014/122] chore: add comments and rename timeout --- src/block_range_scanner.rs | 25 ++++++++--------- src/safe_provider.rs | 56 +++++++++++++++++++++++++++++--------- 2 files changed, 54 insertions(+), 27 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 9c4589f3..50368d66 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -73,7 +73,9 @@ use tokio::{ use tokio_stream::{StreamExt, wrappers::ReceiverStream}; use crate::{ - safe_provider::{DEFAULT_MAX_RETRIES, DEFAULT_RETRY_INTERVAL, DEFAULT_TIMEOUT, SafeProvider}, + safe_provider::{ + DEFAULT_MAX_RETRIES, DEFAULT_MAX_TIMEOUT, DEFAULT_RETRY_INTERVAL, SafeProvider, + }, types::{ScannerMessage, ScannerStatus}, }; use alloy::{ @@ -104,11 +106,6 @@ pub const MAX_BUFFERED_MESSAGES: usize = 50000; // is considered final) pub const DEFAULT_REORG_REWIND_DEPTH: u64 = 64; -// RPC retry and timeout settings -pub const DEFAULT_RPC_TIMEOUT: Duration = Duration::from_secs(30); -pub const DEFAULT_RPC_MAX_RETRIES: usize = 5; -pub const DEFAULT_RPC_RETRY_INTERVAL: Duration = Duration::from_secs(1); - pub type BlockRangeMessage = ScannerMessage, BlockRangeScannerError>; impl From, BlockRangeScannerError>> for BlockRangeMessage { @@ -237,7 +234,7 @@ pub struct BlockRangeScanner { blocks_read_per_epoch: usize, max_reorg_depth: u64, block_confirmations: u64, - timeout: Duration, + max_timeout: Duration, max_retries: usize, retry_interval: Duration, } @@ -255,7 +252,7 @@ impl BlockRangeScanner { blocks_read_per_epoch: DEFAULT_BLOCKS_READ_PER_EPOCH, max_reorg_depth: DEFAULT_REORG_REWIND_DEPTH, block_confirmations: DEFAULT_BLOCK_CONFIRMATIONS, - timeout: DEFAULT_TIMEOUT, + max_timeout: DEFAULT_MAX_TIMEOUT, max_retries: DEFAULT_MAX_RETRIES, retry_interval: DEFAULT_RETRY_INTERVAL, } @@ -280,8 +277,8 @@ impl BlockRangeScanner { } #[must_use] - pub fn with_timeout(mut self, rpc_timeout: Duration) -> Self { - self.timeout = rpc_timeout; + pub fn with_max_timeout(mut self, rpc_timeout: Duration) -> Self { + self.max_timeout = rpc_timeout; self } @@ -334,7 +331,7 @@ impl BlockRangeScanner { provider: RootProvider, ) -> TransportResult> { let safe_provider = SafeProvider::new(provider) - .with_timeout(self.timeout) + .with_max_timeout(self.max_timeout) .with_max_retries(self.max_retries) .with_retry_interval(self.retry_interval); @@ -1141,9 +1138,9 @@ mod tests { fn mocked_provider(asserter: Asserter) -> SafeProvider { let root_provider = RootProvider::new(RpcClient::mocked(asserter)); SafeProvider::new(root_provider) - .with_timeout(DEFAULT_RPC_TIMEOUT) - .with_max_retries(DEFAULT_RPC_MAX_RETRIES) - .with_retry_interval(DEFAULT_RPC_RETRY_INTERVAL) + .with_max_timeout(DEFAULT_MAX_TIMEOUT) + .with_max_retries(DEFAULT_MAX_RETRIES) + .with_retry_interval(DEFAULT_RETRY_INTERVAL) } #[test] diff --git a/src/safe_provider.rs b/src/safe_provider.rs index 4fde8150..b4deaf22 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -39,32 +39,38 @@ use alloy::{ use backon::{ExponentialBuilder, Retryable}; use tracing::{debug, error}; -pub const DEFAULT_TIMEOUT: Duration = Duration::from_secs(30); +// RPC retry and timeout settings +/// Default timeout used by `SafeProvider` +pub const DEFAULT_MAX_TIMEOUT: Duration = Duration::from_secs(30); +/// Default maximum number of retry attempts. pub const DEFAULT_MAX_RETRIES: usize = 5; +/// Default base delay between retries. pub const DEFAULT_RETRY_INTERVAL: Duration = Duration::from_secs(1); +/// Provider wrapper adding retries and timeouts. #[derive(Clone)] pub struct SafeProvider { provider: RootProvider, - timeout: Duration, + max_timeout: Duration, max_retries: usize, retry_interval: Duration, } impl SafeProvider { + /// Create a new `SafeProvider` with default settings. #[must_use] pub fn new(provider: RootProvider) -> Self { Self { provider, - timeout: DEFAULT_TIMEOUT, + max_timeout: DEFAULT_MAX_TIMEOUT, max_retries: DEFAULT_MAX_RETRIES, retry_interval: DEFAULT_RETRY_INTERVAL, } } #[must_use] - pub fn with_timeout(mut self, timeout: Duration) -> Self { - self.timeout = timeout; + pub fn with_max_timeout(mut self, timeout: Duration) -> Self { + self.max_timeout = timeout; self } @@ -85,7 +91,11 @@ impl SafeProvider { &self.provider } - #[allow(clippy::missing_errors_doc)] + /// Fetch a block by number with retry and timeout. + /// + /// # Errors + /// Returns `RpcError` if the RPC call fails + /// after exhausting retries or times out. pub async fn get_block_by_number( &self, number: BlockNumberOrTag, @@ -100,7 +110,11 @@ impl SafeProvider { result } - #[allow(clippy::missing_errors_doc)] + /// Fetch the latest block number with retry and timeout. + /// + /// # Errors + /// Returns `RpcError` if the RPC call fails + /// after exhausting retries or times out. pub async fn get_block_number(&self) -> Result> { debug!("SafeProvider eth_getBlockNumber called"); let provider = self.provider.clone(); @@ -111,7 +125,11 @@ impl SafeProvider { result } - #[allow(clippy::missing_errors_doc)] + /// Fetch a block by hash with retry and timeout. + /// + /// # Errors + /// Returns `RpcError` if the RPC call fails + /// after exhausting retries or times out. pub async fn get_block_by_hash( &self, hash: alloy::primitives::BlockHash, @@ -126,7 +144,11 @@ impl SafeProvider { result } - #[allow(clippy::missing_errors_doc)] + /// Fetch logs for the given filter with retry and timeout. + /// + /// # Errors + /// Returns `RpcError` if the RPC call fails + /// after exhausting retries or times out. pub async fn get_logs( &self, filter: &Filter, @@ -140,7 +162,11 @@ impl SafeProvider { result } - #[allow(clippy::missing_errors_doc)] + /// Subscribe to new block headers with retry and timeout. + /// + /// # Errors + /// Returns `RpcError` if the subscription + /// cannot be established after retries or times out. pub async fn subscribe_blocks( &self, ) -> Result, RpcError> { @@ -153,7 +179,11 @@ impl SafeProvider { result } - #[allow(clippy::missing_errors_doc)] + /// Execute `operation` with exponential backoff and a total timeout. + /// + /// # Errors + /// Returns `RpcError` if all attempts fail or the + /// total delay exceeds the configured timeout. pub(crate) async fn retry_with_timeout( &self, operation: F, @@ -164,7 +194,7 @@ impl SafeProvider { { let retry_strategy = ExponentialBuilder::default() .with_max_times(self.max_retries) - .with_total_delay(Some(self.timeout)) + .with_total_delay(Some(self.max_timeout)) .with_min_delay(self.retry_interval); operation.retry(retry_strategy).sleep(tokio::time::sleep).await @@ -184,7 +214,7 @@ mod tests { ) -> SafeProvider { SafeProvider { provider: RootProvider::::new_http("http://localhost:8545".parse().unwrap()), - timeout, + max_timeout: timeout, max_retries, retry_interval, } From 8bd13b1f49a2973e3158d2f0e42a4545bfeb752c Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 21 Oct 2025 22:45:09 +0900 Subject: [PATCH 015/122] chore: doctest --- src/safe_provider.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index b4deaf22..d7b32f27 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -18,8 +18,9 @@ //! let provider = RootProvider::::new( //! ClientBuilder::default().ws(WsConnect::new("wss://localhost:8000")).await?, //! ); -//! let safe_provider = -//! SafeProvider::new(provider).with_timeout(Duration::from_secs(30)).with_max_retries(5); +//! let safe_provider = SafeProvider::new(provider) +//! .with_max_timeout(Duration::from_secs(30)) +//! .with_max_retries(5); //! //! let block = safe_provider.get_block_by_number(12345.into()).await?; //! Ok(()) From af7ce0159fdd15f3b4dffae876f04c77bbbf490d Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 21 Oct 2025 23:09:58 +0900 Subject: [PATCH 016/122] feat: add total timeout --- src/safe_provider.rs | 66 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 55 insertions(+), 11 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index d7b32f27..bbd8648a 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -103,8 +103,9 @@ impl SafeProvider { ) -> Result, RpcError> { debug!("SafeProvider eth_getBlockByNumber called with number: {:?}", number); let provider = self.provider.clone(); - let result = - self.retry_with_timeout(|| async { provider.get_block_by_number(number).await }).await; + let result = self + .retry_with_total_timeout(|| async { provider.get_block_by_number(number).await }) + .await; if let Err(e) = &result { error!("SafeProvider eth_getByBlockNumber failed: {}", e); } @@ -119,7 +120,8 @@ impl SafeProvider { pub async fn get_block_number(&self) -> Result> { debug!("SafeProvider eth_getBlockNumber called"); let provider = self.provider.clone(); - let result = self.retry_with_timeout(|| async { provider.get_block_number().await }).await; + let result = + self.retry_with_total_timeout(|| async { provider.get_block_number().await }).await; if let Err(e) = &result { error!("SafeProvider eth_getBlockNumber failed: {}", e); } @@ -137,8 +139,9 @@ impl SafeProvider { ) -> Result, RpcError> { debug!("SafeProvider eth_getBlockByHash called with hash: {:?}", hash); let provider = self.provider.clone(); - let result = - self.retry_with_timeout(|| async { provider.get_block_by_hash(hash).await }).await; + let result = self + .retry_with_total_timeout(|| async { provider.get_block_by_hash(hash).await }) + .await; if let Err(e) = &result { error!("SafeProvider eth_getBlockByHash failed: {}", e); } @@ -156,7 +159,8 @@ impl SafeProvider { ) -> Result, RpcError> { debug!("eth_getLogs called with filter: {:?}", filter); let provider = self.provider.clone(); - let result = self.retry_with_timeout(|| async { provider.get_logs(filter).await }).await; + let result = + self.retry_with_total_timeout(|| async { provider.get_logs(filter).await }).await; if let Err(e) = &result { error!("SafeProvider eth_getLogs failed: {}", e); } @@ -173,19 +177,20 @@ impl SafeProvider { ) -> Result, RpcError> { debug!("eth_subscribe called"); let provider = self.provider.clone(); - let result = self.retry_with_timeout(|| async { provider.subscribe_blocks().await }).await; + let result = + self.retry_with_total_timeout(|| async { provider.subscribe_blocks().await }).await; if let Err(e) = &result { error!("SafeProvider eth_subscribe failed: {}", e); } result } - /// Execute `operation` with exponential backoff and a total timeout. + /// Execute `operation` with exponential backoff respecting only the backoff budget. /// /// # Errors /// Returns `RpcError` if all attempts fail or the - /// total delay exceeds the configured timeout. - pub(crate) async fn retry_with_timeout( + /// cumulative backoff delay exceeds the configured budget. + async fn retry_with_timeout( &self, operation: F, ) -> Result> @@ -195,11 +200,34 @@ impl SafeProvider { { let retry_strategy = ExponentialBuilder::default() .with_max_times(self.max_retries) - .with_total_delay(Some(self.max_timeout)) .with_min_delay(self.retry_interval); operation.retry(retry_strategy).sleep(tokio::time::sleep).await } + + /// Execute `operation` with exponential backoff and a true total timeout. + /// + /// Wraps the retry logic with `tokio::time::timeout(self.max_timeout, ...)` so + /// the entire operation (including time spent inside the RPC call) cannot exceed + /// `max_timeout`. + /// + /// # Errors + /// - Returns `RpcError` with message "total operation timeout exceeded" if + /// the overall timeout elapses. + /// - Propagates any `RpcError` from the underlying retries. + async fn retry_with_total_timeout( + &self, + operation: F, + ) -> Result> + where + F: Fn() -> Fut, + Fut: Future>>, + { + match tokio::time::timeout(self.max_timeout, self.retry_with_timeout(operation)).await { + Ok(res) => res, + Err(_) => Err(TransportErrorKind::custom_str("total operation timeout exceeded")), + } + } } #[cfg(test)] @@ -207,6 +235,7 @@ mod tests { use super::*; use alloy::network::Ethereum; use std::sync::{Arc, Mutex}; + use tokio::time::sleep; fn create_test_provider( timeout: Duration, @@ -297,4 +326,19 @@ mod tests { assert!(result.is_err()); assert_eq!(*call_count.lock().unwrap(), 3); } + + #[tokio::test] + async fn test_retry_with_timeout_respects_total_delay() { + let max_timeout = Duration::from_millis(50); + let provider = create_test_provider(max_timeout, 10, Duration::from_millis(1)); + + let result = provider + .retry_with_total_timeout(move || async move { + sleep(max_timeout + Duration::from_millis(10)).await; + Ok(42) + }) + .await; + + assert!(result.is_err()); + } } From 471f767795d50f7db47a5d3830f979562c5bb2ba Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 23 Oct 2025 20:15:25 +0900 Subject: [PATCH 017/122] ref: imporving tracing message --- src/safe_provider.rs | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index bbd8648a..9cc72e49 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -38,7 +38,7 @@ use alloy::{ transports::{RpcError, TransportErrorKind}, }; use backon::{ExponentialBuilder, Retryable}; -use tracing::{debug, error}; +use tracing::{error, info}; // RPC retry and timeout settings /// Default timeout used by `SafeProvider` @@ -87,11 +87,6 @@ impl SafeProvider { self } - #[must_use] - pub fn inner(&self) -> &RootProvider { - &self.provider - } - /// Fetch a block by number with retry and timeout. /// /// # Errors @@ -101,13 +96,13 @@ impl SafeProvider { &self, number: BlockNumberOrTag, ) -> Result, RpcError> { - debug!("SafeProvider eth_getBlockByNumber called with number: {:?}", number); + info!("eth_getBlockByNumber called"); let provider = self.provider.clone(); let result = self .retry_with_total_timeout(|| async { provider.get_block_by_number(number).await }) .await; if let Err(e) = &result { - error!("SafeProvider eth_getByBlockNumber failed: {}", e); + error!("eth_getByBlockNumber failed: {}", e); } result } @@ -118,12 +113,12 @@ impl SafeProvider { /// Returns `RpcError` if the RPC call fails /// after exhausting retries or times out. pub async fn get_block_number(&self) -> Result> { - debug!("SafeProvider eth_getBlockNumber called"); + info!("eth_getBlockNumber called"); let provider = self.provider.clone(); let result = self.retry_with_total_timeout(|| async { provider.get_block_number().await }).await; if let Err(e) = &result { - error!("SafeProvider eth_getBlockNumber failed: {}", e); + error!("eth_getBlockNumber failed: {}", e); } result } @@ -137,13 +132,13 @@ impl SafeProvider { &self, hash: alloy::primitives::BlockHash, ) -> Result, RpcError> { - debug!("SafeProvider eth_getBlockByHash called with hash: {:?}", hash); + info!("eth_getBlockByHash called"); let provider = self.provider.clone(); let result = self .retry_with_total_timeout(|| async { provider.get_block_by_hash(hash).await }) .await; if let Err(e) = &result { - error!("SafeProvider eth_getBlockByHash failed: {}", e); + error!("eth_getBlockByHash failed: {}", e); } result } @@ -157,12 +152,12 @@ impl SafeProvider { &self, filter: &Filter, ) -> Result, RpcError> { - debug!("eth_getLogs called with filter: {:?}", filter); + info!("eth_getLogs called"); let provider = self.provider.clone(); let result = self.retry_with_total_timeout(|| async { provider.get_logs(filter).await }).await; if let Err(e) = &result { - error!("SafeProvider eth_getLogs failed: {}", e); + error!("eth_getLogs failed: {}", e); } result } @@ -175,12 +170,12 @@ impl SafeProvider { pub async fn subscribe_blocks( &self, ) -> Result, RpcError> { - debug!("eth_subscribe called"); + info!("eth_subscribe called"); let provider = self.provider.clone(); let result = self.retry_with_total_timeout(|| async { provider.subscribe_blocks().await }).await; if let Err(e) = &result { - error!("SafeProvider eth_subscribe failed: {}", e); + error!("eth_subscribe failed: {}", e); } result } From 855e16746d177a164c3bc5c531966dd646c09bcf Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 23 Oct 2025 20:20:08 +0900 Subject: [PATCH 018/122] ref: collapse timeout fn to one --- src/safe_provider.rs | 71 ++++++++++++++++++++------------------------ 1 file changed, 33 insertions(+), 38 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index 9cc72e49..9eca83be 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -90,8 +90,9 @@ impl SafeProvider { /// Fetch a block by number with retry and timeout. /// /// # Errors - /// Returns `RpcError` if the RPC call fails - /// after exhausting retries or times out. + /// + /// Returns an error if RPC call fails repeatedly even + /// after exhausting retries or if the call times out. pub async fn get_block_by_number( &self, number: BlockNumberOrTag, @@ -110,8 +111,9 @@ impl SafeProvider { /// Fetch the latest block number with retry and timeout. /// /// # Errors - /// Returns `RpcError` if the RPC call fails - /// after exhausting retries or times out. + /// + /// Returns an error if RPC call fails repeatedly even + /// after exhausting retries or if the call times out. pub async fn get_block_number(&self) -> Result> { info!("eth_getBlockNumber called"); let provider = self.provider.clone(); @@ -126,8 +128,9 @@ impl SafeProvider { /// Fetch a block by hash with retry and timeout. /// /// # Errors - /// Returns `RpcError` if the RPC call fails - /// after exhausting retries or times out. + /// + /// Returns an error if RPC call fails repeatedly even + /// after exhausting retries or if the call times out. pub async fn get_block_by_hash( &self, hash: alloy::primitives::BlockHash, @@ -146,8 +149,9 @@ impl SafeProvider { /// Fetch logs for the given filter with retry and timeout. /// /// # Errors - /// Returns `RpcError` if the RPC call fails - /// after exhausting retries or times out. + /// + /// Returns an error if RPC call fails repeatedly even + /// after exhausting retries or if the call times out. pub async fn get_logs( &self, filter: &Filter, @@ -165,8 +169,9 @@ impl SafeProvider { /// Subscribe to new block headers with retry and timeout. /// /// # Errors - /// Returns `RpcError` if the subscription - /// cannot be established after retries or times out. + /// + /// Returns an error if RPC call fails repeatedly even + /// after exhausting retries or if the call times out. pub async fn subscribe_blocks( &self, ) -> Result, RpcError> { @@ -180,36 +185,17 @@ impl SafeProvider { result } - /// Execute `operation` with exponential backoff respecting only the backoff budget. - /// - /// # Errors - /// Returns `RpcError` if all attempts fail or the - /// cumulative backoff delay exceeds the configured budget. - async fn retry_with_timeout( - &self, - operation: F, - ) -> Result> - where - F: Fn() -> Fut, - Fut: Future>>, - { - let retry_strategy = ExponentialBuilder::default() - .with_max_times(self.max_retries) - .with_min_delay(self.retry_interval); - - operation.retry(retry_strategy).sleep(tokio::time::sleep).await - } - - /// Execute `operation` with exponential backoff and a true total timeout. + /// Execute `operation` with exponential backoff and a total timeout. /// /// Wraps the retry logic with `tokio::time::timeout(self.max_timeout, ...)` so /// the entire operation (including time spent inside the RPC call) cannot exceed /// `max_timeout`. /// /// # Errors - /// - Returns `RpcError` with message "total operation timeout exceeded" if - /// the overall timeout elapses. - /// - Propagates any `RpcError` from the underlying retries. + /// + /// - Returns [`RpcError`] with message "total operation timeout exceeded" + /// if the overall timeout elapses. + /// - Propagates any [`RpcError`] from the underlying retries. async fn retry_with_total_timeout( &self, operation: F, @@ -218,7 +204,16 @@ impl SafeProvider { F: Fn() -> Fut, Fut: Future>>, { - match tokio::time::timeout(self.max_timeout, self.retry_with_timeout(operation)).await { + let retry_strategy = ExponentialBuilder::default() + .with_max_times(self.max_retries) + .with_min_delay(self.retry_interval); + + match tokio::time::timeout( + self.max_timeout, + operation.retry(retry_strategy).sleep(tokio::time::sleep), + ) + .await + { Ok(res) => res, Err(_) => Err(TransportErrorKind::custom_str("total operation timeout exceeded")), } @@ -254,7 +249,7 @@ mod tests { let call_count_clone = call_count.clone(); let result = provider - .retry_with_timeout(move || { + .retry_with_total_timeout(move || { let count = call_count_clone.clone(); async move { let mut c = count.lock().unwrap(); @@ -278,7 +273,7 @@ mod tests { let call_count_clone = call_count.clone(); let result = provider - .retry_with_timeout(move || { + .retry_with_total_timeout(move || { let count = call_count_clone.clone(); async move { let mut c = count.lock().unwrap(); @@ -306,7 +301,7 @@ mod tests { let call_count_clone = call_count.clone(); let result = provider - .retry_with_timeout(move || { + .retry_with_total_timeout(move || { let count = call_count_clone.clone(); async move { let mut c = count.lock().unwrap(); From ff40e8aad6d41a420c2c4f25466682c0802c252c Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 23 Oct 2025 20:24:15 +0900 Subject: [PATCH 019/122] ref: better syntax --- src/safe_provider.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index 9eca83be..eb5241ce 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -116,9 +116,8 @@ impl SafeProvider { /// after exhausting retries or if the call times out. pub async fn get_block_number(&self) -> Result> { info!("eth_getBlockNumber called"); - let provider = self.provider.clone(); - let result = - self.retry_with_total_timeout(|| async { provider.get_block_number().await }).await; + let operation = || self.provider.get_block_number(); + let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!("eth_getBlockNumber failed: {}", e); } From 992853caf99d17f5ba0821c4c07f06a817ea4d16 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 23 Oct 2025 20:27:54 +0900 Subject: [PATCH 020/122] ref: remove with and address default nit --- src/block_range_scanner.rs | 9 +++------ src/safe_provider.rs | 6 +++--- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 50368d66..9c86fb93 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -331,9 +331,9 @@ impl BlockRangeScanner { provider: RootProvider, ) -> TransportResult> { let safe_provider = SafeProvider::new(provider) - .with_max_timeout(self.max_timeout) - .with_max_retries(self.max_retries) - .with_retry_interval(self.retry_interval); + .max_timeout(self.max_timeout) + .max_retries(self.max_retries) + .retry_interval(self.retry_interval); Ok(ConnectedBlockRangeScanner { provider: safe_provider, @@ -1138,9 +1138,6 @@ mod tests { fn mocked_provider(asserter: Asserter) -> SafeProvider { let root_provider = RootProvider::new(RpcClient::mocked(asserter)); SafeProvider::new(root_provider) - .with_max_timeout(DEFAULT_MAX_TIMEOUT) - .with_max_retries(DEFAULT_MAX_RETRIES) - .with_retry_interval(DEFAULT_RETRY_INTERVAL) } #[test] diff --git a/src/safe_provider.rs b/src/safe_provider.rs index eb5241ce..41e48b9a 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -70,19 +70,19 @@ impl SafeProvider { } #[must_use] - pub fn with_max_timeout(mut self, timeout: Duration) -> Self { + pub fn max_timeout(mut self, timeout: Duration) -> Self { self.max_timeout = timeout; self } #[must_use] - pub fn with_max_retries(mut self, max_retries: usize) -> Self { + pub fn max_retries(mut self, max_retries: usize) -> Self { self.max_retries = max_retries; self } #[must_use] - pub fn with_retry_interval(mut self, retry_interval: Duration) -> Self { + pub fn retry_interval(mut self, retry_interval: Duration) -> Self { self.retry_interval = retry_interval; self } From ab35dc52aefe1add867b0a071874610b40c316f3 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 23 Oct 2025 20:31:09 +0900 Subject: [PATCH 021/122] fix: doctest --- src/safe_provider.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index 41e48b9a..9e4f2798 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -18,9 +18,8 @@ //! let provider = RootProvider::::new( //! ClientBuilder::default().ws(WsConnect::new("wss://localhost:8000")).await?, //! ); -//! let safe_provider = SafeProvider::new(provider) -//! .with_max_timeout(Duration::from_secs(30)) -//! .with_max_retries(5); +//! let safe_provider = +//! SafeProvider::new(provider).max_timeout(Duration::from_secs(30)).max_retries(5); //! //! let block = safe_provider.get_block_by_number(12345.into()).await?; //! Ok(()) From cdf9b9539ebc21de67dd15cab71222617e7d24f1 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 23 Oct 2025 22:39:49 +0900 Subject: [PATCH 022/122] Update src/safe_provider.rs Co-authored-by: Nenad --- src/safe_provider.rs | 33 +++++++++++++-------------------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index 41e48b9a..b19195b7 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -226,41 +226,34 @@ mod tests { use std::sync::{Arc, Mutex}; use tokio::time::sleep; - fn create_test_provider( - timeout: Duration, + fn test_provider( + timeout: u64, max_retries: usize, - retry_interval: Duration, + retry_interval: u64, ) -> SafeProvider { SafeProvider { - provider: RootProvider::::new_http("http://localhost:8545".parse().unwrap()), - max_timeout: timeout, + provider: RootProvider::new_http("http://localhost:8545".parse().unwrap()), + max_timeout: Duration::from_millis(timeout), max_retries, - retry_interval, + retry_interval: Duration::from_millis(retry_interval), } } #[tokio::test] async fn test_retry_with_timeout_succeeds_on_first_attempt() { - let provider = - create_test_provider(Duration::from_millis(100), 3, Duration::from_millis(10)); + let provider = test_provider(100, 3, 10); - let call_count = Arc::new(Mutex::new(0)); - let call_count_clone = call_count.clone(); + let call_count = AtomicUsize::new(0); let result = provider - .retry_with_total_timeout(move || { - let count = call_count_clone.clone(); - async move { - let mut c = count.lock().unwrap(); - *c += 1; - Ok(42) - } + .retry_with_timeout(|| async { + call_count.fetch_add(1, Ordering::SeqCst); + Ok(42) }) .await; - assert!(result.is_ok()); - assert_eq!(result.unwrap(), 42); - assert_eq!(*call_count.lock().unwrap(), 1); + assert!(matches!(result, Ok(42))); + assert_eq!(call_count.load(Ordering::SeqCst), 1); } #[tokio::test] From 74cd3d7766ddc2c5673a3357d09d547eff4db9bd Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 23 Oct 2025 22:51:21 +0900 Subject: [PATCH 023/122] ref: use atomic usize --- src/safe_provider.rs | 56 +++++++++++++++++--------------------------- 1 file changed, 22 insertions(+), 34 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index cfe3a258..00483d63 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -222,7 +222,7 @@ impl SafeProvider { mod tests { use super::*; use alloy::network::Ethereum; - use std::sync::{Arc, Mutex}; + use std::sync::atomic::{AtomicUsize, Ordering}; use tokio::time::sleep; fn test_provider( @@ -245,7 +245,7 @@ mod tests { let call_count = AtomicUsize::new(0); let result = provider - .retry_with_timeout(|| async { + .retry_with_total_timeout(|| async { call_count.fetch_add(1, Ordering::SeqCst); Ok(42) }) @@ -257,65 +257,53 @@ mod tests { #[tokio::test] async fn test_retry_with_timeout_retries_on_error() { - let provider = - create_test_provider(Duration::from_millis(100), 3, Duration::from_millis(10)); + let provider = test_provider(100, 3, 10); - let call_count = Arc::new(Mutex::new(0)); - let call_count_clone = call_count.clone(); + let call_count = AtomicUsize::new(0); let result = provider - .retry_with_total_timeout(move || { - let count = call_count_clone.clone(); - async move { - let mut c = count.lock().unwrap(); - *c += 1; - if *c < 3 { - Err(TransportErrorKind::custom_str("temporary error")) - } else { - Ok(42) - } + .retry_with_total_timeout(|| async { + call_count.fetch_add(1, Ordering::SeqCst); + if call_count.load(Ordering::SeqCst) < 3 { + Err(TransportErrorKind::custom_str("temporary error")) + } else { + Ok(42) } }) .await; assert!(result.is_ok()); assert_eq!(result.unwrap(), 42); - assert_eq!(*call_count.lock().unwrap(), 3); + assert_eq!(call_count.load(Ordering::SeqCst), 3); } #[tokio::test] async fn test_retry_with_timeout_fails_after_max_retries() { - let provider = - create_test_provider(Duration::from_millis(100), 2, Duration::from_millis(10)); + let provider = test_provider(100, 2, 10); - let call_count = Arc::new(Mutex::new(0)); - let call_count_clone = call_count.clone(); + let call_count = AtomicUsize::new(0); let result = provider - .retry_with_total_timeout(move || { - let count = call_count_clone.clone(); - async move { - let mut c = count.lock().unwrap(); - *c += 1; - Err::>(TransportErrorKind::custom_str( - "permanent error", - )) - } + .retry_with_total_timeout(|| async { + call_count.fetch_add(1, Ordering::SeqCst); + Err::>(TransportErrorKind::custom_str( + "permanent error", + )) }) .await; assert!(result.is_err()); - assert_eq!(*call_count.lock().unwrap(), 3); + assert_eq!(call_count.load(Ordering::SeqCst), 3); } #[tokio::test] async fn test_retry_with_timeout_respects_total_delay() { - let max_timeout = Duration::from_millis(50); - let provider = create_test_provider(max_timeout, 10, Duration::from_millis(1)); + let max_timeout = 50; + let provider = test_provider(max_timeout, 10, 1); let result = provider .retry_with_total_timeout(move || async move { - sleep(max_timeout + Duration::from_millis(10)).await; + sleep(Duration::from_millis(max_timeout + 10)).await; Ok(42) }) .await; From 41cc7339bb83ede727cded2958792a9f14ecf834 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 23 Oct 2025 22:57:45 +0900 Subject: [PATCH 024/122] ref: update test to match for error --- src/safe_provider.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index 00483d63..3b48bb19 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -292,7 +292,8 @@ mod tests { }) .await; - assert!(result.is_err()); + let err = result.unwrap_err(); + assert!(err.to_string().contains("permanent error"),); assert_eq!(call_count.load(Ordering::SeqCst), 3); } @@ -308,6 +309,7 @@ mod tests { }) .await; - assert!(result.is_err()); + let err = result.unwrap_err(); + assert!(err.to_string().contains("total operation timeout exceeded"),); } } From 4af01ab4a03b79879b8f178e9755c7b7b742a6e7 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 23 Oct 2025 23:08:30 +0900 Subject: [PATCH 025/122] ref: update doc --- src/safe_provider.rs | 72 +++++++++++++++++++++----------------------- 1 file changed, 35 insertions(+), 37 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index 3b48bb19..83605185 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -1,31 +1,3 @@ -//! Safe provider wrapper with built-in retry and timeout mechanisms. -//! -//! This module provides a wrapper around Alloy providers that automatically -//! handles retries, timeouts, and error logging for RPC calls. -//! -//! # Example -//! -//! ```rust,no_run -//! use alloy::{ -//! network::Ethereum, -//! providers::{RootProvider, WsConnect}, -//! rpc::client::ClientBuilder, -//! }; -//! use event_scanner::safe_provider::SafeProvider; -//! use std::time::Duration; -//! -//! async fn example() -> Result<(), Box> { -//! let provider = RootProvider::::new( -//! ClientBuilder::default().ws(WsConnect::new("wss://localhost:8000")).await?, -//! ); -//! let safe_provider = -//! SafeProvider::new(provider).max_timeout(Duration::from_secs(30)).max_retries(5); -//! -//! let block = safe_provider.get_block_by_number(12345.into()).await?; -//! Ok(()) -//! } -//! ``` - use std::{future::Future, time::Duration}; use alloy::{ @@ -39,15 +11,33 @@ use alloy::{ use backon::{ExponentialBuilder, Retryable}; use tracing::{error, info}; -// RPC retry and timeout settings -/// Default timeout used by `SafeProvider` -pub const DEFAULT_MAX_TIMEOUT: Duration = Duration::from_secs(30); -/// Default maximum number of retry attempts. -pub const DEFAULT_MAX_RETRIES: usize = 5; -/// Default base delay between retries. -pub const DEFAULT_RETRY_INTERVAL: Duration = Duration::from_secs(1); - -/// Provider wrapper adding retries and timeouts. +/// Safe provider wrapper with built-in retry and timeout mechanisms. +/// +/// This wrapper around Alloy providers automatically handles retries, +/// timeouts, and error logging for RPC calls. +/// +/// # Example +/// +/// ```rust,no_run +/// # use alloy::{ +/// # network::Ethereum, +/// # providers::{RootProvider, WsConnect}, +/// # rpc::client::ClientBuilder, +/// # }; +/// # use event_scanner::safe_provider::SafeProvider; +/// # use std::time::Duration; +/// +/// async fn create_safe_provider() -> Result<(), Box> { +/// let provider = RootProvider::::new( +/// ClientBuilder::default().ws(WsConnect::new("wss://localhost:8000")).await?, +/// ); +/// let safe_provider = +/// SafeProvider::new(provider).max_timeout(Duration::from_secs(30)).max_retries(5); +/// +/// let block = safe_provider.get_block_by_number(12345.into()).await?; +/// Ok(()) +/// } +/// ``` #[derive(Clone)] pub struct SafeProvider { provider: RootProvider, @@ -56,6 +46,14 @@ pub struct SafeProvider { retry_interval: Duration, } +// RPC retry and timeout settings +/// Default timeout used by `SafeProvider` +pub const DEFAULT_MAX_TIMEOUT: Duration = Duration::from_secs(30); +/// Default maximum number of retry attempts. +pub const DEFAULT_MAX_RETRIES: usize = 5; +/// Default base delay between retries. +pub const DEFAULT_RETRY_INTERVAL: Duration = Duration::from_secs(1); + impl SafeProvider { /// Create a new `SafeProvider` with default settings. #[must_use] From 3329c3847ce8f1d143bb9c856437dec644e3537e Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 23 Oct 2025 23:21:33 +0900 Subject: [PATCH 026/122] fix: merge errors with connect methods --- src/block_range_scanner.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index f6e99b87..1fe78f56 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -82,6 +82,7 @@ use alloy::{ eips::BlockNumberOrTag, network::{BlockResponse, Network, primitives::HeaderResponse}, primitives::{B256, BlockNumber}, + providers::RootProvider, pubsub::Subscription, rpc::client::ClientBuilder, transports::{ @@ -173,7 +174,7 @@ impl BlockRangeScanner { ws_url: Url, ) -> TransportResult> { let provider = - SafeProvider::::new(ClientBuilder::default().ws(WsConnect::new(ws_url)).await?); + RootProvider::::new(ClientBuilder::default().ws(WsConnect::new(ws_url)).await?); Ok(self.connect(provider)) } @@ -186,7 +187,7 @@ impl BlockRangeScanner { self, ipc_path: String, ) -> Result, RpcError> { - let provider = SafeProvider::::new(ClientBuilder::default().ipc(ipc_path.into()).await?); + let provider = RootProvider::::new(ClientBuilder::default().ipc(ipc_path.into()).await?); Ok(self.connect(provider)) } @@ -196,8 +197,15 @@ impl BlockRangeScanner { /// /// Returns an error if the connection fails #[must_use] - pub fn connect(self, provider: SafeProvider) -> ConnectedBlockRangeScanner { - ConnectedBlockRangeScanner { provider, max_block_range: self.max_block_range } + pub fn connect(self, provider: RootProvider) -> ConnectedBlockRangeScanner { + let safe_provider = SafeProvider::new(provider) + .max_timeout(self.max_timeout) + .max_retries(self.max_retries) + .retry_interval(self.retry_interval); + ConnectedBlockRangeScanner { + provider: safe_provider, + max_block_range: self.max_block_range, + } } } From 586b03e6f21804b4387d4388b77e3bc8e187a54a Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 23 Oct 2025 23:22:33 +0900 Subject: [PATCH 027/122] fix: root --> safe provider --- src/event_scanner/modes/common.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/event_scanner/modes/common.rs b/src/event_scanner/modes/common.rs index a841f4fb..b31c4ded 100644 --- a/src/event_scanner/modes/common.rs +++ b/src/event_scanner/modes/common.rs @@ -3,10 +3,10 @@ use std::ops::RangeInclusive; use crate::{ block_range_scanner::{MAX_BUFFERED_MESSAGES, Message as BlockRangeMessage}, event_scanner::{filter::EventFilter, listener::EventListener, message::Message}, + safe_provider::SafeProvider, }; use alloy::{ network::Network, - providers::{Provider, RootProvider}, rpc::types::{Filter, Log}, transports::{RpcError, TransportErrorKind}, }; @@ -25,7 +25,7 @@ pub enum ConsumerMode { pub async fn handle_stream( mut stream: ReceiverStream, - provider: &RootProvider, + provider: &SafeProvider, listeners: &[EventListener], mode: ConsumerMode, ) { @@ -42,7 +42,7 @@ pub async fn handle_stream( } pub fn spawn_log_consumers( - provider: &RootProvider, + provider: &SafeProvider, listeners: &[EventListener], range_tx: &Sender, mode: ConsumerMode, @@ -129,7 +129,7 @@ async fn get_logs( range: RangeInclusive, event_filter: &EventFilter, log_filter: &Filter, - provider: &RootProvider, + provider: &SafeProvider, ) -> Result, RpcError> { let log_filter = log_filter.clone().from_block(*range.start()).to_block(*range.end()); From 8a080e45d091a3a598e58126019debda4722b5cf Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 23 Oct 2025 23:28:23 +0900 Subject: [PATCH 028/122] ref: tracing update --- src/lib.rs | 2 +- src/safe_provider.rs | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 5034a7e1..28165f66 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,7 +1,7 @@ pub mod block_range_scanner; pub mod error; pub mod event_scanner; -pub mod safe_provider; +mod safe_provider; #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; pub mod types; diff --git a/src/safe_provider.rs b/src/safe_provider.rs index 83605185..539b75c4 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -100,7 +100,7 @@ impl SafeProvider { .retry_with_total_timeout(|| async { provider.get_block_by_number(number).await }) .await; if let Err(e) = &result { - error!("eth_getByBlockNumber failed: {}", e); + error!(error = %e, "eth_getByBlockNumber failed"); } result } @@ -116,7 +116,7 @@ impl SafeProvider { let operation = || self.provider.get_block_number(); let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { - error!("eth_getBlockNumber failed: {}", e); + error!(error = %e, "eth_getBlockNumber failed"); } result } @@ -137,7 +137,7 @@ impl SafeProvider { .retry_with_total_timeout(|| async { provider.get_block_by_hash(hash).await }) .await; if let Err(e) = &result { - error!("eth_getBlockByHash failed: {}", e); + error!(error = %e, "eth_getBlockByHash failed"); } result } @@ -157,7 +157,7 @@ impl SafeProvider { let result = self.retry_with_total_timeout(|| async { provider.get_logs(filter).await }).await; if let Err(e) = &result { - error!("eth_getLogs failed: {}", e); + error!(error = %e, "eth_getLogs failed"); } result } @@ -176,7 +176,7 @@ impl SafeProvider { let result = self.retry_with_total_timeout(|| async { provider.subscribe_blocks().await }).await; if let Err(e) = &result { - error!("eth_subscribe failed: {}", e); + error!(error = %e, "eth_subscribe failed"); } result } From 982ea96c67b06376e082a515bc2ecc53b6d9137a Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 23 Oct 2025 23:29:37 +0900 Subject: [PATCH 029/122] ref: remove doc --- src/safe_provider.rs | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index 539b75c4..4b310395 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -15,29 +15,6 @@ use tracing::{error, info}; /// /// This wrapper around Alloy providers automatically handles retries, /// timeouts, and error logging for RPC calls. -/// -/// # Example -/// -/// ```rust,no_run -/// # use alloy::{ -/// # network::Ethereum, -/// # providers::{RootProvider, WsConnect}, -/// # rpc::client::ClientBuilder, -/// # }; -/// # use event_scanner::safe_provider::SafeProvider; -/// # use std::time::Duration; -/// -/// async fn create_safe_provider() -> Result<(), Box> { -/// let provider = RootProvider::::new( -/// ClientBuilder::default().ws(WsConnect::new("wss://localhost:8000")).await?, -/// ); -/// let safe_provider = -/// SafeProvider::new(provider).max_timeout(Duration::from_secs(30)).max_retries(5); -/// -/// let block = safe_provider.get_block_by_number(12345.into()).await?; -/// Ok(()) -/// } -/// ``` #[derive(Clone)] pub struct SafeProvider { provider: RootProvider, From f51c107647309cd6ecdafa7aa12507a1a70e4da6 Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 27 Oct 2025 18:52:49 +0900 Subject: [PATCH 030/122] feat: add fallbackprovider --- src/safe_provider.rs | 161 +++++++++++++++++++++++++++++++++---------- 1 file changed, 126 insertions(+), 35 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index 4b310395..bbb47843 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -21,6 +21,7 @@ pub struct SafeProvider { max_timeout: Duration, max_retries: usize, retry_interval: Duration, + fallback_providers: Vec>, } // RPC retry and timeout settings @@ -40,6 +41,7 @@ impl SafeProvider { max_timeout: DEFAULT_MAX_TIMEOUT, max_retries: DEFAULT_MAX_RETRIES, retry_interval: DEFAULT_RETRY_INTERVAL, + fallback_providers: Vec::new(), } } @@ -61,6 +63,15 @@ impl SafeProvider { self } + /// Add a fallback provider to the list. + /// + /// Fallback providers are used when the primary provider times out. + #[must_use] + pub fn fallback_provider(mut self, provider: RootProvider) -> Self { + self.fallback_providers.push(provider); + self + } + /// Fetch a block by number with retry and timeout. /// /// # Errors @@ -72,9 +83,10 @@ impl SafeProvider { number: BlockNumberOrTag, ) -> Result, RpcError> { info!("eth_getBlockByNumber called"); - let provider = self.provider.clone(); let result = self - .retry_with_total_timeout(|| async { provider.get_block_by_number(number).await }) + .retry_with_total_timeout(move |provider| async move { + provider.get_block_by_number(number).await + }) .await; if let Err(e) = &result { error!(error = %e, "eth_getByBlockNumber failed"); @@ -90,8 +102,11 @@ impl SafeProvider { /// after exhausting retries or if the call times out. pub async fn get_block_number(&self) -> Result> { info!("eth_getBlockNumber called"); - let operation = || self.provider.get_block_number(); - let result = self.retry_with_total_timeout(operation).await; + let result = self + .retry_with_total_timeout( + move |provider| async move { provider.get_block_number().await }, + ) + .await; if let Err(e) = &result { error!(error = %e, "eth_getBlockNumber failed"); } @@ -109,9 +124,10 @@ impl SafeProvider { hash: alloy::primitives::BlockHash, ) -> Result, RpcError> { info!("eth_getBlockByHash called"); - let provider = self.provider.clone(); let result = self - .retry_with_total_timeout(|| async { provider.get_block_by_hash(hash).await }) + .retry_with_total_timeout(move |provider| async move { + provider.get_block_by_hash(hash).await + }) .await; if let Err(e) = &result { error!(error = %e, "eth_getBlockByHash failed"); @@ -130,9 +146,12 @@ impl SafeProvider { filter: &Filter, ) -> Result, RpcError> { info!("eth_getLogs called"); - let provider = self.provider.clone(); - let result = - self.retry_with_total_timeout(|| async { provider.get_logs(filter).await }).await; + let result = self + .retry_with_total_timeout(move |provider| { + let filter = filter.clone(); + async move { provider.get_logs(&filter).await } + }) + .await; if let Err(e) = &result { error!(error = %e, "eth_getLogs failed"); } @@ -149,9 +168,11 @@ impl SafeProvider { &self, ) -> Result, RpcError> { info!("eth_subscribe called"); - let provider = self.provider.clone(); - let result = - self.retry_with_total_timeout(|| async { provider.subscribe_blocks().await }).await; + let result = self + .retry_with_total_timeout( + move |provider| async move { provider.subscribe_blocks().await }, + ) + .await; if let Err(e) = &result { error!(error = %e, "eth_subscribe failed"); } @@ -164,17 +185,71 @@ impl SafeProvider { /// the entire operation (including time spent inside the RPC call) cannot exceed /// `max_timeout`. /// + /// If the timeout is exceeded and fallback providers are available, it will + /// attempt to use each fallback provider in sequence. + /// /// # Errors /// - /// - Returns [`RpcError`] with message "total operation timeout exceeded" - /// if the overall timeout elapses. + /// - Returns [`RpcError`] with message "total operation timeout exceeded + /// and all fallback providers failed" if the overall timeout elapses and no fallback + /// providers succeed. /// - Propagates any [`RpcError`] from the underlying retries. async fn retry_with_total_timeout( &self, operation: F, ) -> Result> where - F: Fn() -> Fut, + F: Fn(RootProvider) -> Fut, + Fut: Future>>, + { + // Try primary provider first + let result = self.try_provider_with_timeout(&self.provider, &operation).await; + + if let Ok(value) = result { + return Ok(value); + } + + if result.is_err() && self.fallback_providers.is_empty() { + return result; + } + + info!( + "Primary provider failed, trying {} fallback provider(s)", + self.fallback_providers.len() + ); + + // Try each fallback provider + for (idx, fallback_provider) in self.fallback_providers.iter().enumerate() { + info!("Attempting fallback provider {}", idx + 1); + + let fallback_result = + self.try_provider_with_timeout(fallback_provider, &operation).await; + + match fallback_result { + Ok(value) => { + info!("Fallback provider {} succeeded", idx + 1); + return Ok(value); + } + Err(e) => { + error!("Fallback provider {} failed with error: {}", idx + 1, e); + } + } + } + + error!("All fallback providers failed or timed out"); + Err(TransportErrorKind::custom_str( + "total operation timeout exceeded and all fallback providers failed", + )) + } + + /// Try executing an operation with a specific provider with retry and timeout. + async fn try_provider_with_timeout( + &self, + provider: &RootProvider, + operation: F, + ) -> Result> + where + F: Fn(RootProvider) -> Fut, Fut: Future>>, { let retry_strategy = ExponentialBuilder::default() @@ -183,7 +258,7 @@ impl SafeProvider { match tokio::time::timeout( self.max_timeout, - operation.retry(retry_strategy).sleep(tokio::time::sleep), + (|| operation(provider.clone())).retry(retry_strategy).sleep(tokio::time::sleep), ) .await { @@ -197,7 +272,10 @@ impl SafeProvider { mod tests { use super::*; use alloy::network::Ethereum; - use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::{ + Arc, + atomic::{AtomicUsize, Ordering}, + }; use tokio::time::sleep; fn test_provider( @@ -210,6 +288,7 @@ mod tests { max_timeout: Duration::from_millis(timeout), max_retries, retry_interval: Duration::from_millis(retry_interval), + fallback_providers: Vec::new(), } } @@ -217,12 +296,16 @@ mod tests { async fn test_retry_with_timeout_succeeds_on_first_attempt() { let provider = test_provider(100, 3, 10); - let call_count = AtomicUsize::new(0); + let call_count = Arc::new(AtomicUsize::new(0)); + let call_count_clone = call_count.clone(); let result = provider - .retry_with_total_timeout(|| async { - call_count.fetch_add(1, Ordering::SeqCst); - Ok(42) + .retry_with_total_timeout(move |_provider| { + let call_count = call_count_clone.clone(); + async move { + call_count.fetch_add(1, Ordering::SeqCst); + Ok(42) + } }) .await; @@ -234,15 +317,19 @@ mod tests { async fn test_retry_with_timeout_retries_on_error() { let provider = test_provider(100, 3, 10); - let call_count = AtomicUsize::new(0); + let call_count = Arc::new(AtomicUsize::new(0)); + let call_count_clone = call_count.clone(); let result = provider - .retry_with_total_timeout(|| async { - call_count.fetch_add(1, Ordering::SeqCst); - if call_count.load(Ordering::SeqCst) < 3 { - Err(TransportErrorKind::custom_str("temporary error")) - } else { - Ok(42) + .retry_with_total_timeout(move |_provider| { + let call_count = call_count_clone.clone(); + async move { + call_count.fetch_add(1, Ordering::SeqCst); + if call_count.load(Ordering::SeqCst) < 3 { + Err(TransportErrorKind::custom_str("temporary error")) + } else { + Ok(42) + } } }) .await; @@ -256,14 +343,18 @@ mod tests { async fn test_retry_with_timeout_fails_after_max_retries() { let provider = test_provider(100, 2, 10); - let call_count = AtomicUsize::new(0); + let call_count = Arc::new(AtomicUsize::new(0)); + let call_count_clone = call_count.clone(); let result = provider - .retry_with_total_timeout(|| async { - call_count.fetch_add(1, Ordering::SeqCst); - Err::>(TransportErrorKind::custom_str( - "permanent error", - )) + .retry_with_total_timeout(move |_provider| { + let call_count = call_count_clone.clone(); + async move { + call_count.fetch_add(1, Ordering::SeqCst); + Err::>(TransportErrorKind::custom_str( + "permanent error", + )) + } }) .await; @@ -278,7 +369,7 @@ mod tests { let provider = test_provider(max_timeout, 10, 1); let result = provider - .retry_with_total_timeout(move || async move { + .retry_with_total_timeout(move |_provider| async move { sleep(Duration::from_millis(max_timeout + 10)).await; Ok(42) }) From 49f06bc0cac45ad8dc8804382859a63b06f56e92 Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 27 Oct 2025 19:01:17 +0900 Subject: [PATCH 031/122] ref: avoid clone provider when possible --- src/safe_provider.rs | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index 4b310395..d454c32c 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -72,10 +72,8 @@ impl SafeProvider { number: BlockNumberOrTag, ) -> Result, RpcError> { info!("eth_getBlockByNumber called"); - let provider = self.provider.clone(); - let result = self - .retry_with_total_timeout(|| async { provider.get_block_by_number(number).await }) - .await; + let operation = async || self.provider.get_block_by_number(number).await; + let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!(error = %e, "eth_getByBlockNumber failed"); } @@ -109,10 +107,8 @@ impl SafeProvider { hash: alloy::primitives::BlockHash, ) -> Result, RpcError> { info!("eth_getBlockByHash called"); - let provider = self.provider.clone(); - let result = self - .retry_with_total_timeout(|| async { provider.get_block_by_hash(hash).await }) - .await; + let operation = async || self.provider.get_block_by_hash(hash).await; + let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!(error = %e, "eth_getBlockByHash failed"); } @@ -130,9 +126,8 @@ impl SafeProvider { filter: &Filter, ) -> Result, RpcError> { info!("eth_getLogs called"); - let provider = self.provider.clone(); - let result = - self.retry_with_total_timeout(|| async { provider.get_logs(filter).await }).await; + let operation = || self.provider.get_logs(filter); + let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!(error = %e, "eth_getLogs failed"); } From 9566f5193f74db17c131eb995a0ef4be2c2f5ed6 Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 27 Oct 2025 19:21:33 +0900 Subject: [PATCH 032/122] ref: import --- src/safe_provider.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index bbb47843..be5ba170 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -9,6 +9,7 @@ use alloy::{ transports::{RpcError, TransportErrorKind}, }; use backon::{ExponentialBuilder, Retryable}; +use tokio::time::timeout; use tracing::{error, info}; /// Safe provider wrapper with built-in retry and timeout mechanisms. @@ -256,7 +257,7 @@ impl SafeProvider { .with_max_times(self.max_retries) .with_min_delay(self.retry_interval); - match tokio::time::timeout( + match timeout( self.max_timeout, (|| operation(provider.clone())).retry(retry_strategy).sleep(tokio::time::sleep), ) From c87251f33f169c7d1f2698476f6ed857b8699b7e Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 27 Oct 2025 22:17:21 +0900 Subject: [PATCH 033/122] feat: add list of providers --- src/block_range_scanner.rs | 113 +++++++++++++++++--------------- src/event_scanner/modes/sync.rs | 12 ++-- 2 files changed, 67 insertions(+), 58 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 1fe78f56..62c8ac08 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -82,7 +82,7 @@ use alloy::{ eips::BlockNumberOrTag, network::{BlockResponse, Network, primitives::HeaderResponse}, primitives::{B256, BlockNumber}, - providers::RootProvider, + providers::{Provider, RootProvider}, pubsub::Subscription, rpc::client::ClientBuilder, transports::{ @@ -115,21 +115,22 @@ impl PartialEq> for Message { } } -#[derive(Clone, Copy)] -pub struct BlockRangeScanner { +#[derive(Clone)] +pub struct BlockRangeScanner { pub max_block_range: u64, pub max_timeout: Duration, pub max_retries: usize, pub retry_interval: Duration, + pub providers: Vec>, } -impl Default for BlockRangeScanner { +impl Default for BlockRangeScanner { fn default() -> Self { Self::new() } } -impl BlockRangeScanner { +impl BlockRangeScanner { #[must_use] pub fn new() -> Self { Self { @@ -137,6 +138,7 @@ impl BlockRangeScanner { max_timeout: DEFAULT_MAX_TIMEOUT, max_retries: DEFAULT_MAX_RETRIES, retry_interval: DEFAULT_RETRY_INTERVAL, + providers: Vec::new(), } } @@ -164,15 +166,18 @@ impl BlockRangeScanner { self } + #[must_use] + pub fn fallback_provider(mut self, provider: RootProvider) -> Self { + self.providers.push(provider); + self + } + /// Connects to the provider via WebSocket /// /// # Errors /// /// Returns an error if the connection fails - pub async fn connect_ws( - self, - ws_url: Url, - ) -> TransportResult> { + pub async fn connect_ws(self, ws_url: Url) -> TransportResult> { let provider = RootProvider::::new(ClientBuilder::default().ws(WsConnect::new(ws_url)).await?); Ok(self.connect(provider)) @@ -183,7 +188,8 @@ impl BlockRangeScanner { /// # Errors /// /// Returns an error if the connection fails - pub async fn connect_ipc( + #[must_use] + pub async fn connect_ipc( self, ipc_path: String, ) -> Result, RpcError> { @@ -191,13 +197,16 @@ impl BlockRangeScanner { Ok(self.connect(provider)) } + // pub fn fallback_provider(self, provider: RootProvider) -> Self {} + /// Connects to an existing provider /// /// # Errors /// - /// Returns an error if the connection fails + /// Returns an error if the connection fails or provider does not support pubsub. #[must_use] - pub fn connect(self, provider: RootProvider) -> ConnectedBlockRangeScanner { + pub fn connect(self, provider: RootProvider) -> ConnectedBlockRangeScanner { + provider.client().expect_pubsub_frontend(); let safe_provider = SafeProvider::new(provider) .max_timeout(self.max_timeout) .max_retries(self.max_retries) @@ -1025,7 +1034,7 @@ mod tests { #[test] fn block_range_scanner_defaults_match_constants() { - let scanner = BlockRangeScanner::new(); + let scanner = BlockRangeScanner::::new(); assert_eq!(scanner.max_block_range, DEFAULT_MAX_BLOCK_RANGE); } @@ -1034,7 +1043,7 @@ mod tests { fn builder_methods_update_configuration() { let max_block_range = 42; - let scanner = BlockRangeScanner::new().max_block_range(max_block_range); + let scanner = BlockRangeScanner::::new().max_block_range(max_block_range); assert_eq!(scanner.max_block_range, max_block_range); } @@ -1103,8 +1112,8 @@ mod tests { async fn live_mode_processes_all_blocks() -> anyhow::Result<()> { let anvil = Anvil::new().block_time_f64(0.01).try_spawn()?; - let client = BlockRangeScanner::new() - .connect_ws::(anvil.ws_endpoint_url()) + let client = BlockRangeScanner::::new() + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1138,8 +1147,8 @@ mod tests { let block_confirmations = 5; - let client = BlockRangeScanner::new() - .connect_ws::(anvil.ws_endpoint_url()) + let client = BlockRangeScanner::::new() + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1176,8 +1185,8 @@ mod tests { let block_confirmations = 5; - let client = BlockRangeScanner::new() - .connect_ws::(anvil.ws_endpoint_url()) + let client = BlockRangeScanner::::new() + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1214,8 +1223,8 @@ mod tests { let block_confirmations = 5; - let client = BlockRangeScanner::new() - .connect_ws::(anvil.ws_endpoint_url()) + let client = BlockRangeScanner::::new() + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1256,8 +1265,8 @@ mod tests { let block_confirmations = 5; - let client = BlockRangeScanner::new() - .connect_ws::(anvil.ws_endpoint_url()) + let client = BlockRangeScanner::::new() + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1300,8 +1309,8 @@ mod tests { let block_confirmations = 3; - let client = BlockRangeScanner::new() - .connect_ws::(anvil.ws_endpoint_url()) + let client = BlockRangeScanner::::new() + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1367,9 +1376,9 @@ mod tests { let end_num = 110; - let client = BlockRangeScanner::new() + let client = BlockRangeScanner::::new() .max_block_range(30) - .connect_ws::(anvil.ws_endpoint_url()) + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1402,9 +1411,9 @@ mod tests { let end_num = 120; - let client = BlockRangeScanner::new() + let client = BlockRangeScanner::::new() .max_block_range(30) - .connect_ws::(anvil.ws_endpoint_url()) + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1437,9 +1446,9 @@ mod tests { provider.anvil_mine(Option::Some(100), Option::None).await?; - let client = BlockRangeScanner::new() + let client = BlockRangeScanner::::new() .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1468,9 +1477,9 @@ mod tests { assert_next!(stream, None); // range where blocks per epoch is larger than the number of blocks on chain - let client = BlockRangeScanner::new() + let client = BlockRangeScanner::::new() .max_block_range(200) - .connect_ws::(anvil.ws_endpoint_url()) + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1492,9 +1501,9 @@ mod tests { let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; provider.anvil_mine(Option::Some(11), Option::None).await?; - let client = BlockRangeScanner::new() + let client = BlockRangeScanner::::new() .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1678,9 +1687,9 @@ mod tests { provider.anvil_mine(Option::Some(150), Option::None).await?; - let client = BlockRangeScanner::new() + let client = BlockRangeScanner::::new() .max_block_range(100) - .connect_ws::(anvil.ws_endpoint_url()) + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1702,9 +1711,9 @@ mod tests { provider.anvil_mine(Option::Some(15), Option::None).await?; - let client = BlockRangeScanner::new() + let client = BlockRangeScanner::::new() .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1727,9 +1736,9 @@ mod tests { provider.anvil_mine(Option::Some(15), Option::None).await?; - let client = BlockRangeScanner::new() + let client = BlockRangeScanner::::new() .max_block_range(4) - .connect_ws::(anvil.ws_endpoint_url()) + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1752,9 +1761,9 @@ mod tests { provider.anvil_mine(Option::Some(15), Option::None).await?; - let client = BlockRangeScanner::new() + let client = BlockRangeScanner::::new() .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1774,9 +1783,9 @@ mod tests { provider.anvil_mine(Option::Some(15), Option::None).await?; - let client = BlockRangeScanner::new() + let client = BlockRangeScanner::::new() .max_block_range(1) - .connect_ws::(anvil.ws_endpoint_url()) + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1800,9 +1809,9 @@ mod tests { // Mine 20 blocks, so the total number of blocks is 21 (including 0th block) provider.anvil_mine(Option::Some(20), Option::None).await?; - let client = BlockRangeScanner::new() + let client = BlockRangeScanner::::new() .max_block_range(7) - .connect_ws::(anvil.ws_endpoint_url()) + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1826,9 +1835,9 @@ mod tests { // Ensure blocks at 3 and 15 exist provider.anvil_mine(Option::Some(16), Option::None).await?; - let client = BlockRangeScanner::new() + let client = BlockRangeScanner::::new() .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; @@ -1854,9 +1863,9 @@ mod tests { let anvil = Anvil::new().try_spawn()?; // Do not mine up to 999 so start won't exist - let client = BlockRangeScanner::new() + let client = BlockRangeScanner::::new() .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) + .connect_ws(anvil.ws_endpoint_url()) .await? .run()?; diff --git a/src/event_scanner/modes/sync.rs b/src/event_scanner/modes/sync.rs index acbd197f..d9d245a5 100644 --- a/src/event_scanner/modes/sync.rs +++ b/src/event_scanner/modes/sync.rs @@ -22,19 +22,19 @@ use crate::{ }, }; -pub struct SyncScannerBuilder { - block_range_scanner: BlockRangeScanner, +pub struct SyncScannerBuilder { + block_range_scanner: BlockRangeScanner, from_block: BlockNumberOrTag, block_confirmations: u64, } pub struct SyncEventScanner { - config: SyncScannerBuilder, + config: SyncScannerBuilder, block_range_scanner: ConnectedBlockRangeScanner, listeners: Vec, } -impl SyncScannerBuilder { +impl SyncScannerBuilder { #[must_use] pub(crate) fn new() -> Self { Self { @@ -69,8 +69,8 @@ impl SyncScannerBuilder { /// # Errors /// /// Returns an error if the connection fails - pub async fn connect_ws(self, ws_url: Url) -> TransportResult> { - let block_range_scanner = self.block_range_scanner.connect_ws::(ws_url).await?; + pub async fn connect_ws(self, ws_url: Url) -> TransportResult> { + let block_range_scanner = self.block_range_scanner.connect_ws(ws_url).await?; Ok(SyncEventScanner { config: self, block_range_scanner, listeners: Vec::new() }) } From a0d3087d955949ba0c43997e9c8779c526a95498 Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 27 Oct 2025 22:48:05 +0900 Subject: [PATCH 034/122] feat: update to add fallback provider to main scanners --- examples/historical_scanning/main.rs | 2 +- examples/latest_events_scanning/main.rs | 2 +- examples/live_scanning/main.rs | 2 +- examples/sync_scanning/main.rs | 2 +- src/block_range_scanner.rs | 13 +++++-- src/event_scanner/modes/historic.rs | 49 ++++++++++++++----------- src/event_scanner/modes/latest.rs | 49 ++++++++++++++----------- src/event_scanner/modes/live.rs | 43 ++++++++++++---------- src/event_scanner/modes/mod.rs | 10 +++-- src/event_scanner/modes/sync.rs | 38 +++++++++++-------- tests/latest_events/basic.rs | 16 ++++---- 11 files changed, 129 insertions(+), 97 deletions(-) diff --git a/examples/historical_scanning/main.rs b/examples/historical_scanning/main.rs index 7f820bb4..4efe6510 100644 --- a/examples/historical_scanning/main.rs +++ b/examples/historical_scanning/main.rs @@ -55,7 +55,7 @@ async fn main() -> anyhow::Result<()> { let _ = counter_contract.increase().send().await?.get_receipt().await?; let mut scanner = - EventScanner::historic().connect_ws::(anvil.ws_endpoint_url()).await?; + EventScanner::historic::().connect_ws(anvil.ws_endpoint_url()).await?; let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/latest_events_scanning/main.rs b/examples/latest_events_scanning/main.rs index 6631d17e..69a3fa53 100644 --- a/examples/latest_events_scanning/main.rs +++ b/examples/latest_events_scanning/main.rs @@ -49,7 +49,7 @@ async fn main() -> anyhow::Result<()> { .event(Counter::CountIncreased::SIGNATURE); let mut scanner = - EventScanner::latest().count(5).connect_ws::(anvil.ws_endpoint_url()).await?; + EventScanner::latest::().count(5).connect_ws(anvil.ws_endpoint_url()).await?; let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/live_scanning/main.rs b/examples/live_scanning/main.rs index 7553ba22..c9ba3623 100644 --- a/examples/live_scanning/main.rs +++ b/examples/live_scanning/main.rs @@ -52,7 +52,7 @@ async fn main() -> anyhow::Result<()> { .contract_address(*contract_address) .event(Counter::CountIncreased::SIGNATURE); - let mut scanner = EventScanner::live().connect_ws::(anvil.ws_endpoint_url()).await?; + let mut scanner = EventScanner::live::().connect_ws(anvil.ws_endpoint_url()).await?; let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/sync_scanning/main.rs b/examples/sync_scanning/main.rs index a7c31699..57c71444 100644 --- a/examples/sync_scanning/main.rs +++ b/examples/sync_scanning/main.rs @@ -57,7 +57,7 @@ async fn main() -> anyhow::Result<()> { info!("Historical event {} created", i + 1); } - let mut scanner = EventScanner::sync().connect_ws::(anvil.ws_endpoint_url()).await?; + let mut scanner = EventScanner::sync::().connect_ws(anvil.ws_endpoint_url()).await?; let mut stream = scanner.subscribe(increase_filter); diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 62c8ac08..61dc17a2 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -121,7 +121,7 @@ pub struct BlockRangeScanner { pub max_timeout: Duration, pub max_retries: usize, pub retry_interval: Duration, - pub providers: Vec>, + pub fallback_providers: Vec>, } impl Default for BlockRangeScanner { @@ -138,7 +138,7 @@ impl BlockRangeScanner { max_timeout: DEFAULT_MAX_TIMEOUT, max_retries: DEFAULT_MAX_RETRIES, retry_interval: DEFAULT_RETRY_INTERVAL, - providers: Vec::new(), + fallback_providers: Vec::new(), } } @@ -166,9 +166,15 @@ impl BlockRangeScanner { self } + /// Adds a fallback provider to the block range scanner + /// + /// # Errors + /// + /// Will panic if the provider does not implement pubsub #[must_use] pub fn fallback_provider(mut self, provider: RootProvider) -> Self { - self.providers.push(provider); + provider.client().expect_pubsub_frontend(); + self.fallback_providers.push(provider); self } @@ -188,7 +194,6 @@ impl BlockRangeScanner { /// # Errors /// /// Returns an error if the connection fails - #[must_use] pub async fn connect_ipc( self, ipc_path: String, diff --git a/src/event_scanner/modes/historic.rs b/src/event_scanner/modes/historic.rs index 02f08383..84de6846 100644 --- a/src/event_scanner/modes/historic.rs +++ b/src/event_scanner/modes/historic.rs @@ -19,19 +19,19 @@ use crate::{ }, }; -pub struct HistoricScannerBuilder { - block_range_scanner: BlockRangeScanner, +pub struct HistoricScannerBuilder { + block_range_scanner: BlockRangeScanner, from_block: BlockNumberOrTag, to_block: BlockNumberOrTag, } pub struct HistoricEventScanner { - config: HistoricScannerBuilder, + config: HistoricScannerBuilder, block_range_scanner: ConnectedBlockRangeScanner, listeners: Vec, } -impl HistoricScannerBuilder { +impl HistoricScannerBuilder { #[must_use] pub(crate) fn new() -> Self { Self { @@ -59,6 +59,17 @@ impl HistoricScannerBuilder { self } + /// Adds a fallback provider (can add multiple) + /// + /// # Errors + /// + /// Will panic if the provider does not implement pubsub + #[must_use] + pub fn fallback_provider(mut self, provider: RootProvider) -> Self { + self.block_range_scanner.fallback_providers.push(provider); + self + } + /// Connects to the provider via WebSocket. /// /// Final builder method: consumes the builder and returns the built [`HistoricEventScanner`]. @@ -66,11 +77,8 @@ impl HistoricScannerBuilder { /// # Errors /// /// Returns an error if the connection fails - pub async fn connect_ws( - self, - ws_url: Url, - ) -> TransportResult> { - let block_range_scanner = self.block_range_scanner.connect_ws::(ws_url).await?; + pub async fn connect_ws(self, ws_url: Url) -> TransportResult> { + let block_range_scanner = self.block_range_scanner.clone().connect_ws(ws_url).await?; Ok(HistoricEventScanner { config: self, block_range_scanner, listeners: Vec::new() }) } @@ -81,11 +89,8 @@ impl HistoricScannerBuilder { /// # Errors /// /// Returns an error if the connection fails - pub async fn connect_ipc( - self, - ipc_path: String, - ) -> TransportResult> { - let block_range_scanner = self.block_range_scanner.connect_ipc::(ipc_path).await?; + pub async fn connect_ipc(self, ipc_path: String) -> TransportResult> { + let block_range_scanner = self.block_range_scanner.clone().connect_ipc(ipc_path).await?; Ok(HistoricEventScanner { config: self, block_range_scanner, listeners: Vec::new() }) } @@ -97,8 +102,8 @@ impl HistoricScannerBuilder { /// /// Returns an error if the connection fails #[must_use] - pub fn connect(self, provider: RootProvider) -> HistoricEventScanner { - let block_range_scanner = self.block_range_scanner.connect::(provider); + pub fn connect(self, provider: RootProvider) -> HistoricEventScanner { + let block_range_scanner = self.block_range_scanner.clone().connect(provider); HistoricEventScanner { config: self, block_range_scanner, listeners: Vec::new() } } } @@ -140,7 +145,7 @@ mod tests { #[test] fn test_historic_scanner_config_defaults() { - let config = HistoricScannerBuilder::new(); + let config: HistoricScannerBuilder = HistoricScannerBuilder::new(); assert!(matches!(config.from_block, BlockNumberOrTag::Earliest)); assert!(matches!(config.to_block, BlockNumberOrTag::Latest)); @@ -148,7 +153,7 @@ mod tests { #[test] fn test_historic_scanner_builder_pattern() { - let config = + let config: HistoricScannerBuilder = HistoricScannerBuilder::new().to_block(200).max_block_range(50).from_block(100); assert!(matches!(config.from_block, BlockNumberOrTag::Number(100))); @@ -158,7 +163,7 @@ mod tests { #[test] fn test_historic_scanner_builder_with_different_block_types() { - let config = HistoricScannerBuilder::new() + let config: HistoricScannerBuilder = HistoricScannerBuilder::new() .from_block(BlockNumberOrTag::Earliest) .to_block(BlockNumberOrTag::Latest); @@ -168,7 +173,7 @@ mod tests { #[test] fn test_historic_scanner_builder_last_call_wins() { - let config = HistoricScannerBuilder::new() + let config: HistoricScannerBuilder = HistoricScannerBuilder::new() .max_block_range(25) .max_block_range(55) .max_block_range(105) @@ -185,7 +190,7 @@ mod tests { #[test] fn test_historic_event_stream_listeners_vector_updates() { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = HistoricScannerBuilder::new().connect::(provider); + let mut scanner = HistoricScannerBuilder::new().connect(provider); assert_eq!(scanner.listeners.len(), 0); let _stream1 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 1); @@ -197,7 +202,7 @@ mod tests { #[test] fn test_historic_event_stream_channel_capacity() { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = HistoricScannerBuilder::new().connect::(provider); + let mut scanner = HistoricScannerBuilder::new().connect(provider); let _stream = scanner.subscribe(EventFilter::new()); let sender = &scanner.listeners[0].sender; assert_eq!(sender.capacity(), MAX_BUFFERED_MESSAGES); diff --git a/src/event_scanner/modes/latest.rs b/src/event_scanner/modes/latest.rs index 2af3e0ae..ebcad5b7 100644 --- a/src/event_scanner/modes/latest.rs +++ b/src/event_scanner/modes/latest.rs @@ -22,8 +22,8 @@ use crate::{ }, }; -pub struct LatestScannerBuilder { - block_range_scanner: BlockRangeScanner, +pub struct LatestScannerBuilder { + block_range_scanner: BlockRangeScanner, count: usize, from_block: BlockNumberOrTag, to_block: BlockNumberOrTag, @@ -31,12 +31,12 @@ pub struct LatestScannerBuilder { } pub struct LatestEventScanner { - config: LatestScannerBuilder, + config: LatestScannerBuilder, block_range_scanner: ConnectedBlockRangeScanner, listeners: Vec, } -impl LatestScannerBuilder { +impl LatestScannerBuilder { #[must_use] pub(crate) fn new() -> Self { Self { @@ -78,6 +78,17 @@ impl LatestScannerBuilder { self } + /// Adds a fallback provider (can add multiple) + /// + /// # Errors + /// + /// Will panic if the provider does not implement pubsub + #[must_use] + pub fn fallback_provider(mut self, provider: RootProvider) -> Self { + self.block_range_scanner.fallback_providers.push(provider); + self + } + /// Connects to the provider via WebSocket. /// /// Final builder method: consumes the builder and returns the built [`LatestEventScanner`]. @@ -85,11 +96,8 @@ impl LatestScannerBuilder { /// # Errors /// /// Returns an error if the connection fails - pub async fn connect_ws( - self, - ws_url: Url, - ) -> TransportResult> { - let block_range_scanner = self.block_range_scanner.connect_ws::(ws_url).await?; + pub async fn connect_ws(self, ws_url: Url) -> TransportResult> { + let block_range_scanner = self.block_range_scanner.clone().connect_ws(ws_url).await?; Ok(LatestEventScanner { config: self, block_range_scanner, listeners: Vec::new() }) } @@ -100,11 +108,8 @@ impl LatestScannerBuilder { /// # Errors /// /// Returns an error if the connection fails - pub async fn connect_ipc( - self, - ipc_path: String, - ) -> TransportResult> { - let block_range_scanner = self.block_range_scanner.connect_ipc::(ipc_path).await?; + pub async fn connect_ipc(self, ipc_path: String) -> TransportResult> { + let block_range_scanner = self.block_range_scanner.clone().connect_ipc(ipc_path).await?; Ok(LatestEventScanner { config: self, block_range_scanner, listeners: Vec::new() }) } @@ -116,8 +121,8 @@ impl LatestScannerBuilder { /// /// Returns an error if the connection fails #[must_use] - pub fn connect(self, provider: RootProvider) -> LatestEventScanner { - let block_range_scanner = self.block_range_scanner.connect::(provider); + pub fn connect(self, provider: RootProvider) -> LatestEventScanner { + let block_range_scanner = self.block_range_scanner.clone().connect(provider); LatestEventScanner { config: self, block_range_scanner, listeners: Vec::new() } } } @@ -166,7 +171,7 @@ mod tests { #[test] fn test_latest_scanner_config_defaults() { - let config = LatestScannerBuilder::new(); + let config: LatestScannerBuilder = LatestScannerBuilder::new(); assert_eq!(config.count, 1); assert!(matches!(config.from_block, BlockNumberOrTag::Latest)); @@ -176,7 +181,7 @@ mod tests { #[test] fn test_latest_scanner_builder_pattern() { - let config = LatestScannerBuilder::new() + let config: LatestScannerBuilder = LatestScannerBuilder::new() .max_block_range(25) .block_confirmations(5) .count(3) @@ -192,7 +197,7 @@ mod tests { #[test] fn test_latest_scanner_builder_with_different_block_types() { - let config = LatestScannerBuilder::new() + let config: LatestScannerBuilder = LatestScannerBuilder::new() .from_block(BlockNumberOrTag::Earliest) .to_block(BlockNumberOrTag::Latest) .count(10) @@ -206,7 +211,7 @@ mod tests { #[test] fn test_latest_scanner_builder_last_call_wins() { - let config = LatestScannerBuilder::new() + let config: LatestScannerBuilder = LatestScannerBuilder::new() .count(1) .count(2) .count(3) @@ -229,7 +234,7 @@ mod tests { #[test] fn test_latest_event_stream_listeners_vector_updates() { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = LatestScannerBuilder::new().connect::(provider); + let mut scanner = LatestScannerBuilder::new().connect(provider); assert_eq!(scanner.listeners.len(), 0); let _stream1 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 1); @@ -241,7 +246,7 @@ mod tests { #[test] fn test_latest_event_stream_channel_capacity() { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = LatestScannerBuilder::new().connect::(provider); + let mut scanner = LatestScannerBuilder::new().connect(provider); let _stream = scanner.subscribe(EventFilter::new()); let sender = &scanner.listeners[0].sender; assert_eq!(sender.capacity(), MAX_BUFFERED_MESSAGES); diff --git a/src/event_scanner/modes/live.rs b/src/event_scanner/modes/live.rs index 90fd7466..ae1d14a6 100644 --- a/src/event_scanner/modes/live.rs +++ b/src/event_scanner/modes/live.rs @@ -21,18 +21,18 @@ use crate::{ }, }; -pub struct LiveScannerBuilder { - block_range_scanner: BlockRangeScanner, +pub struct LiveScannerBuilder { + block_range_scanner: BlockRangeScanner, block_confirmations: u64, } pub struct LiveEventScanner { - config: LiveScannerBuilder, + config: LiveScannerBuilder, block_range_scanner: ConnectedBlockRangeScanner, listeners: Vec, } -impl LiveScannerBuilder { +impl LiveScannerBuilder { #[must_use] pub(crate) fn new() -> Self { Self { @@ -53,6 +53,12 @@ impl LiveScannerBuilder { self } + #[must_use] + pub fn fallback_provider(mut self, provider: RootProvider) -> Self { + self.block_range_scanner.fallback_providers.push(provider); + self + } + /// Connects to the provider via WebSocket. /// /// Final builder method: consumes the builder and returns the built [`LiveEventScanner`]. @@ -60,8 +66,8 @@ impl LiveScannerBuilder { /// # Errors /// /// Returns an error if the connection fails - pub async fn connect_ws(self, ws_url: Url) -> TransportResult> { - let block_range_scanner = self.block_range_scanner.connect_ws::(ws_url).await?; + pub async fn connect_ws(self, ws_url: Url) -> TransportResult> { + let block_range_scanner = self.block_range_scanner.clone().connect_ws(ws_url).await?; Ok(LiveEventScanner { config: self, block_range_scanner, listeners: Vec::new() }) } @@ -72,11 +78,8 @@ impl LiveScannerBuilder { /// # Errors /// /// Returns an error if the connection fails - pub async fn connect_ipc( - self, - ipc_path: String, - ) -> TransportResult> { - let block_range_scanner = self.block_range_scanner.connect_ipc::(ipc_path).await?; + pub async fn connect_ipc(self, ipc_path: String) -> TransportResult> { + let block_range_scanner = self.block_range_scanner.clone().connect_ipc(ipc_path).await?; Ok(LiveEventScanner { config: self, block_range_scanner, listeners: Vec::new() }) } @@ -88,8 +91,8 @@ impl LiveScannerBuilder { /// /// Returns an error if the connection fails #[must_use] - pub fn connect(self, provider: RootProvider) -> LiveEventScanner { - let block_range_scanner = self.block_range_scanner.connect::(provider); + pub fn connect(self, provider: RootProvider) -> LiveEventScanner { + let block_range_scanner = self.block_range_scanner.clone().connect(provider); LiveEventScanner { config: self, block_range_scanner, listeners: Vec::new() } } } @@ -136,14 +139,15 @@ mod tests { #[test] fn test_live_scanner_config_defaults() { - let config = LiveScannerBuilder::new(); + let config: LiveScannerBuilder = LiveScannerBuilder::new(); assert_eq!(config.block_confirmations, DEFAULT_BLOCK_CONFIRMATIONS); } #[test] fn test_live_scanner_builder_pattern() { - let config = LiveScannerBuilder::new().max_block_range(25).block_confirmations(5); + let config: LiveScannerBuilder = + LiveScannerBuilder::new().max_block_range(25).block_confirmations(5); assert_eq!(config.block_range_scanner.max_block_range, 25); assert_eq!(config.block_confirmations, 5); @@ -151,7 +155,8 @@ mod tests { #[test] fn test_live_scanner_builder_with_zero_confirmations() { - let config = LiveScannerBuilder::new().block_confirmations(0).max_block_range(100); + let config: LiveScannerBuilder = + LiveScannerBuilder::new().block_confirmations(0).max_block_range(100); assert_eq!(config.block_confirmations, 0); assert_eq!(config.block_range_scanner.max_block_range, 100); @@ -159,7 +164,7 @@ mod tests { #[test] fn test_live_scanner_builder_last_call_wins() { - let config = LiveScannerBuilder::new() + let config: LiveScannerBuilder = LiveScannerBuilder::new() .max_block_range(25) .max_block_range(55) .max_block_range(105) @@ -174,7 +179,7 @@ mod tests { #[test] fn test_live_event_stream_listeners_vector_updates() { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = LiveScannerBuilder::new().connect::(provider); + let mut scanner = LiveScannerBuilder::new().connect(provider); assert_eq!(scanner.listeners.len(), 0); let _stream1 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 1); @@ -186,7 +191,7 @@ mod tests { #[test] fn test_live_event_stream_channel_capacity() { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = LiveScannerBuilder::new().connect::(provider); + let mut scanner = LiveScannerBuilder::new().connect(provider); let _stream = scanner.subscribe(EventFilter::new()); let sender = &scanner.listeners[0].sender; assert_eq!(sender.capacity(), MAX_BUFFERED_MESSAGES); diff --git a/src/event_scanner/modes/mod.rs b/src/event_scanner/modes/mod.rs index 372d8d2d..b0442f0c 100644 --- a/src/event_scanner/modes/mod.rs +++ b/src/event_scanner/modes/mod.rs @@ -4,6 +4,8 @@ mod latest; mod live; mod sync; +use alloy::network::Network; + pub use historic::{HistoricEventScanner, HistoricScannerBuilder}; pub use latest::{LatestEventScanner, LatestScannerBuilder}; pub use live::{LiveEventScanner, LiveScannerBuilder}; @@ -13,22 +15,22 @@ pub struct EventScanner; impl EventScanner { #[must_use] - pub fn historic() -> HistoricScannerBuilder { + pub fn historic() -> HistoricScannerBuilder { HistoricScannerBuilder::new() } #[must_use] - pub fn live() -> LiveScannerBuilder { + pub fn live() -> LiveScannerBuilder { LiveScannerBuilder::new() } #[must_use] - pub fn sync() -> SyncScannerBuilder { + pub fn sync() -> SyncScannerBuilder { SyncScannerBuilder::new() } #[must_use] - pub fn latest() -> LatestScannerBuilder { + pub fn latest() -> LatestScannerBuilder { LatestScannerBuilder::new() } } diff --git a/src/event_scanner/modes/sync.rs b/src/event_scanner/modes/sync.rs index d9d245a5..d305603b 100644 --- a/src/event_scanner/modes/sync.rs +++ b/src/event_scanner/modes/sync.rs @@ -62,6 +62,17 @@ impl SyncScannerBuilder { self } + /// Adds a fallback provider (can add multiple) + /// + /// # Errors + /// + /// Will panic if the provider does not implement pubsub + #[must_use] + pub fn fallback_provider(mut self, provider: RootProvider) -> Self { + self.block_range_scanner.fallback_providers.push(provider); + self + } + /// Connects to the provider via WebSocket. /// /// Final builder method: consumes the builder and returns the built [`SyncEventScanner`]. @@ -70,7 +81,7 @@ impl SyncScannerBuilder { /// /// Returns an error if the connection fails pub async fn connect_ws(self, ws_url: Url) -> TransportResult> { - let block_range_scanner = self.block_range_scanner.connect_ws(ws_url).await?; + let block_range_scanner = self.block_range_scanner.clone().connect_ws(ws_url).await?; Ok(SyncEventScanner { config: self, block_range_scanner, listeners: Vec::new() }) } @@ -81,11 +92,8 @@ impl SyncScannerBuilder { /// # Errors /// /// Returns an error if the connection fails - pub async fn connect_ipc( - self, - ipc_path: String, - ) -> TransportResult> { - let block_range_scanner = self.block_range_scanner.connect_ipc::(ipc_path).await?; + pub async fn connect_ipc(self, ipc_path: String) -> TransportResult> { + let block_range_scanner = self.block_range_scanner.clone().connect_ipc(ipc_path).await?; Ok(SyncEventScanner { config: self, block_range_scanner, listeners: Vec::new() }) } @@ -97,8 +105,8 @@ impl SyncScannerBuilder { /// /// Returns an error if the connection fails #[must_use] - pub fn connect(self, provider: RootProvider) -> SyncEventScanner { - let block_range_scanner = self.block_range_scanner.connect::(provider); + pub fn connect(self, provider: RootProvider) -> SyncEventScanner { + let block_range_scanner = self.block_range_scanner.clone().connect(provider); SyncEventScanner { config: self, block_range_scanner, listeners: Vec::new() } } } @@ -146,7 +154,7 @@ mod tests { #[test] fn test_sync_scanner_config_defaults() { - let config = SyncScannerBuilder::new(); + let config: SyncScannerBuilder = SyncScannerBuilder::new(); assert!(matches!(config.from_block, BlockNumberOrTag::Earliest)); assert_eq!(config.block_confirmations, DEFAULT_BLOCK_CONFIRMATIONS); @@ -154,7 +162,7 @@ mod tests { #[test] fn test_sync_scanner_builder_pattern() { - let config = SyncScannerBuilder::new() + let config: SyncScannerBuilder = SyncScannerBuilder::new() .max_block_range(25) .block_confirmations(5) .from_block(BlockNumberOrTag::Number(50)); @@ -166,7 +174,7 @@ mod tests { #[test] fn test_sync_scanner_builder_with_different_block_types() { - let config = SyncScannerBuilder::new() + let config: SyncScannerBuilder = SyncScannerBuilder::new() .from_block(BlockNumberOrTag::Earliest) .block_confirmations(20) .max_block_range(100); @@ -178,7 +186,7 @@ mod tests { #[test] fn test_sync_scanner_builder_with_zero_confirmations() { - let config = + let config: SyncScannerBuilder = SyncScannerBuilder::new().from_block(0).block_confirmations(0).max_block_range(75); assert!(matches!(config.from_block, BlockNumberOrTag::Number(0))); @@ -188,7 +196,7 @@ mod tests { #[test] fn test_sync_scanner_builder_last_call_wins() { - let config = SyncScannerBuilder::new() + let config: SyncScannerBuilder = SyncScannerBuilder::new() .max_block_range(25) .max_block_range(55) .max_block_range(105) @@ -205,7 +213,7 @@ mod tests { #[test] fn test_sync_event_stream_listeners_vector_updates() { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = SyncScannerBuilder::new().connect::(provider); + let mut scanner = SyncScannerBuilder::new().connect(provider); assert_eq!(scanner.listeners.len(), 0); let _stream1 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 1); @@ -217,7 +225,7 @@ mod tests { #[test] fn test_sync_event_stream_channel_capacity() { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = SyncScannerBuilder::new().connect::(provider); + let mut scanner = SyncScannerBuilder::new().connect(provider); let _stream = scanner.subscribe(EventFilter::new()); let sender = &scanner.listeners[0].sender; assert_eq!(sender.capacity(), MAX_BUFFERED_MESSAGES); diff --git a/tests/latest_events/basic.rs b/tests/latest_events/basic.rs index 64082f94..0e18580e 100644 --- a/tests/latest_events/basic.rs +++ b/tests/latest_events/basic.rs @@ -9,7 +9,9 @@ use alloy::{ }; use crate::common::{TestCounter, deploy_counter, setup_common, setup_latest_scanner}; -use event_scanner::{EventFilter, EventScanner, assert_next, test_utils::LogMetadata}; +use event_scanner::{ + EventFilter, EventScanner, LatestEventScanner, assert_next, test_utils::LogMetadata, +}; macro_rules! increase { ($contract: expr) => {{ @@ -127,11 +129,11 @@ async fn latest_scanner_respects_range_subset() -> anyhow::Result<()> { let start = BlockNumberOrTag::from(head - 3); let end = BlockNumberOrTag::from(head); - let mut scanner_with_range = EventScanner::latest() + let mut scanner_with_range: LatestEventScanner = EventScanner::latest() .count(10) .from_block(start) .to_block(end) - .connect_ws::(anvil.ws_endpoint_url()) + .connect_ws(anvil.ws_endpoint_url()) .await?; let mut stream_with_range = scanner_with_range.subscribe(default_filter); @@ -324,11 +326,11 @@ async fn latest_scanner_large_gaps_and_empty_ranges() -> anyhow::Result<()> { let start = BlockNumberOrTag::from(head - 12); let end = BlockNumberOrTag::from(head); - let mut scanner_with_range = EventScanner::latest() + let mut scanner_with_range: LatestEventScanner = EventScanner::latest() .count(5) .from_block(start) .to_block(end) - .connect_ws::(anvil.ws_endpoint_url()) + .connect_ws(anvil.ws_endpoint_url()) .await?; let mut stream_with_range = scanner_with_range.subscribe(default_filter); @@ -358,11 +360,11 @@ async fn latest_scanner_boundary_range_single_block() -> anyhow::Result<()> { .unwrap(); let end = start; - let mut scanner_with_range = EventScanner::latest() + let mut scanner_with_range: LatestEventScanner = EventScanner::latest() .count(5) .from_block(start) .to_block(end) - .connect_ws::(anvil.ws_endpoint_url()) + .connect_ws(anvil.ws_endpoint_url()) .await?; let mut stream_with_range = scanner_with_range.subscribe(default_filter); From 919d352ffda661772c511d9846d4067bb6d16cfd Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 27 Oct 2025 22:58:44 +0900 Subject: [PATCH 035/122] fix: doctest --- src/block_range_scanner.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 61dc17a2..4ad96b2a 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -22,8 +22,8 @@ //! tracing_subscriber::fmt::init(); //! //! // Configuration -//! let block_range_scanner = BlockRangeScanner::new() -//! .connect_ws::(Url::parse("ws://localhost:8546").unwrap()) +//! let block_range_scanner = BlockRangeScanner::::new() +//! .connect_ws(Url::parse("ws://localhost:8546").unwrap()) //! .await?; //! //! // Create client to send subscribe command to block scanner From c6693e7d667f862dbb4144c5fc483031be4a94ad Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 27 Oct 2025 22:58:55 +0900 Subject: [PATCH 036/122] ref: better logging --- src/safe_provider.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index be5ba170..d4930f5c 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -214,25 +214,26 @@ impl SafeProvider { return result; } - info!( - "Primary provider failed, trying {} fallback provider(s)", - self.fallback_providers.len() - ); + info!("Primary provider failed, trying fallback provider(s)"); // Try each fallback provider for (idx, fallback_provider) in self.fallback_providers.iter().enumerate() { - info!("Attempting fallback provider {}", idx + 1); + info!( + "Attempting fallback provider {} out of {}", + idx + 1, + self.fallback_providers.len() + ); let fallback_result = self.try_provider_with_timeout(fallback_provider, &operation).await; match fallback_result { Ok(value) => { - info!("Fallback provider {} succeeded", idx + 1); + info!(provider_num = idx + 1, "Fallback provider succeeded"); return Ok(value); } Err(e) => { - error!("Fallback provider {} failed with error: {}", idx + 1, e); + error!(provider_num = idx + 1, err = %e, "Fallback provider failed with error"); } } } From 6d2b9436eacb4d1ec3e8dc15d400634e0b082406 Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 27 Oct 2025 22:59:11 +0900 Subject: [PATCH 037/122] fix: test uses anvil (mock provider does not support pubsub) --- src/event_scanner/modes/historic.rs | 21 ++++++++++++--------- src/event_scanner/modes/latest.rs | 21 ++++++++++++--------- src/event_scanner/modes/live.rs | 21 ++++++++++++--------- src/event_scanner/modes/sync.rs | 21 ++++++++++++--------- 4 files changed, 48 insertions(+), 36 deletions(-) diff --git a/src/event_scanner/modes/historic.rs b/src/event_scanner/modes/historic.rs index 84de6846..63cc5545 100644 --- a/src/event_scanner/modes/historic.rs +++ b/src/event_scanner/modes/historic.rs @@ -141,7 +141,8 @@ impl HistoricEventScanner { #[cfg(test)] mod tests { use super::*; - use alloy::{network::Ethereum, rpc::client::RpcClient, transports::mock::Asserter}; + use alloy::network::Ethereum; + use alloy_node_bindings::Anvil; #[test] fn test_historic_scanner_config_defaults() { @@ -187,24 +188,26 @@ mod tests { assert!(matches!(config.to_block, BlockNumberOrTag::Number(200))); } - #[test] - fn test_historic_event_stream_listeners_vector_updates() { - let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = HistoricScannerBuilder::new().connect(provider); + #[tokio::test] + async fn test_historic_event_stream_listeners_vector_updates() -> anyhow::Result<()> { + let anvil = Anvil::new().try_spawn()?; + let mut scanner = HistoricScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; assert_eq!(scanner.listeners.len(), 0); let _stream1 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 1); let _stream2 = scanner.subscribe(EventFilter::new()); let _stream3 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 3); + Ok(()) } - #[test] - fn test_historic_event_stream_channel_capacity() { - let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = HistoricScannerBuilder::new().connect(provider); + #[tokio::test] + async fn test_historic_event_stream_channel_capacity() -> anyhow::Result<()> { + let anvil = Anvil::new().try_spawn()?; + let mut scanner = HistoricScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; let _stream = scanner.subscribe(EventFilter::new()); let sender = &scanner.listeners[0].sender; assert_eq!(sender.capacity(), MAX_BUFFERED_MESSAGES); + Ok(()) } } diff --git a/src/event_scanner/modes/latest.rs b/src/event_scanner/modes/latest.rs index ebcad5b7..c0e9013b 100644 --- a/src/event_scanner/modes/latest.rs +++ b/src/event_scanner/modes/latest.rs @@ -167,7 +167,8 @@ impl LatestEventScanner { #[cfg(test)] mod tests { use super::*; - use alloy::{network::Ethereum, rpc::client::RpcClient, transports::mock::Asserter}; + use alloy::network::Ethereum; + use alloy_node_bindings::Anvil; #[test] fn test_latest_scanner_config_defaults() { @@ -231,24 +232,26 @@ mod tests { assert_eq!(config.block_range_scanner.max_block_range, 60); } - #[test] - fn test_latest_event_stream_listeners_vector_updates() { - let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = LatestScannerBuilder::new().connect(provider); + #[tokio::test] + async fn test_latest_event_stream_listeners_vector_updates() -> anyhow::Result<()> { + let anvil = Anvil::new().try_spawn()?; + let mut scanner = LatestScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; assert_eq!(scanner.listeners.len(), 0); let _stream1 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 1); let _stream2 = scanner.subscribe(EventFilter::new()); let _stream3 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 3); + Ok(()) } - #[test] - fn test_latest_event_stream_channel_capacity() { - let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = LatestScannerBuilder::new().connect(provider); + #[tokio::test] + async fn test_latest_event_stream_channel_capacity() -> anyhow::Result<()> { + let anvil = Anvil::new().try_spawn()?; + let mut scanner = LatestScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; let _stream = scanner.subscribe(EventFilter::new()); let sender = &scanner.listeners[0].sender; assert_eq!(sender.capacity(), MAX_BUFFERED_MESSAGES); + Ok(()) } } diff --git a/src/event_scanner/modes/live.rs b/src/event_scanner/modes/live.rs index ae1d14a6..35aed94f 100644 --- a/src/event_scanner/modes/live.rs +++ b/src/event_scanner/modes/live.rs @@ -135,7 +135,8 @@ impl LiveEventScanner { #[cfg(test)] mod tests { use super::*; - use alloy::{network::Ethereum, rpc::client::RpcClient, transports::mock::Asserter}; + use alloy::network::Ethereum; + use alloy_node_bindings::Anvil; #[test] fn test_live_scanner_config_defaults() { @@ -176,24 +177,26 @@ mod tests { assert_eq!(config.block_confirmations, 8); } - #[test] - fn test_live_event_stream_listeners_vector_updates() { - let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = LiveScannerBuilder::new().connect(provider); + #[tokio::test] + async fn test_live_event_stream_listeners_vector_updates() -> anyhow::Result<()> { + let anvil = Anvil::new().try_spawn()?; + let mut scanner = LiveScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; assert_eq!(scanner.listeners.len(), 0); let _stream1 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 1); let _stream2 = scanner.subscribe(EventFilter::new()); let _stream3 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 3); + Ok(()) } - #[test] - fn test_live_event_stream_channel_capacity() { - let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = LiveScannerBuilder::new().connect(provider); + #[tokio::test] + async fn test_live_event_stream_channel_capacity() -> anyhow::Result<()> { + let anvil = Anvil::new().try_spawn()?; + let mut scanner = LiveScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; let _stream = scanner.subscribe(EventFilter::new()); let sender = &scanner.listeners[0].sender; assert_eq!(sender.capacity(), MAX_BUFFERED_MESSAGES); + Ok(()) } } diff --git a/src/event_scanner/modes/sync.rs b/src/event_scanner/modes/sync.rs index d305603b..2c0a0a01 100644 --- a/src/event_scanner/modes/sync.rs +++ b/src/event_scanner/modes/sync.rs @@ -150,7 +150,8 @@ impl SyncEventScanner { #[cfg(test)] mod tests { use super::*; - use alloy::{network::Ethereum, rpc::client::RpcClient, transports::mock::Asserter}; + use alloy::network::Ethereum; + use alloy_node_bindings::Anvil; #[test] fn test_sync_scanner_config_defaults() { @@ -210,24 +211,26 @@ mod tests { assert_eq!(config.block_confirmations, 7); } - #[test] - fn test_sync_event_stream_listeners_vector_updates() { - let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = SyncScannerBuilder::new().connect(provider); + #[tokio::test] + async fn test_sync_event_stream_listeners_vector_updates() -> anyhow::Result<()> { + let anvil = Anvil::new().try_spawn()?; + let mut scanner = SyncScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; assert_eq!(scanner.listeners.len(), 0); let _stream1 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 1); let _stream2 = scanner.subscribe(EventFilter::new()); let _stream3 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 3); + Ok(()) } - #[test] - fn test_sync_event_stream_channel_capacity() { - let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = SyncScannerBuilder::new().connect(provider); + #[tokio::test] + async fn test_sync_event_stream_channel_capacity() -> anyhow::Result<()> { + let anvil = Anvil::new().try_spawn()?; + let mut scanner = SyncScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; let _stream = scanner.subscribe(EventFilter::new()); let sender = &scanner.listeners[0].sender; assert_eq!(sender.capacity(), MAX_BUFFERED_MESSAGES); + Ok(()) } } From cd1a2d9baf04d5bec1e1a65d4adfc4fadaed5542 Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 27 Oct 2025 23:02:06 +0900 Subject: [PATCH 038/122] fix: format --- src/event_scanner/modes/historic.rs | 6 ++++-- src/event_scanner/modes/latest.rs | 6 ++++-- src/event_scanner/modes/live.rs | 6 ++++-- src/event_scanner/modes/sync.rs | 6 ++++-- 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/event_scanner/modes/historic.rs b/src/event_scanner/modes/historic.rs index 63cc5545..6e570947 100644 --- a/src/event_scanner/modes/historic.rs +++ b/src/event_scanner/modes/historic.rs @@ -191,7 +191,8 @@ mod tests { #[tokio::test] async fn test_historic_event_stream_listeners_vector_updates() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let mut scanner = HistoricScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; + let mut scanner = + HistoricScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; assert_eq!(scanner.listeners.len(), 0); let _stream1 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 1); @@ -204,7 +205,8 @@ mod tests { #[tokio::test] async fn test_historic_event_stream_channel_capacity() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let mut scanner = HistoricScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; + let mut scanner = + HistoricScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; let _stream = scanner.subscribe(EventFilter::new()); let sender = &scanner.listeners[0].sender; assert_eq!(sender.capacity(), MAX_BUFFERED_MESSAGES); diff --git a/src/event_scanner/modes/latest.rs b/src/event_scanner/modes/latest.rs index c0e9013b..d67962a3 100644 --- a/src/event_scanner/modes/latest.rs +++ b/src/event_scanner/modes/latest.rs @@ -235,7 +235,8 @@ mod tests { #[tokio::test] async fn test_latest_event_stream_listeners_vector_updates() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let mut scanner = LatestScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; + let mut scanner = + LatestScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; assert_eq!(scanner.listeners.len(), 0); let _stream1 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 1); @@ -248,7 +249,8 @@ mod tests { #[tokio::test] async fn test_latest_event_stream_channel_capacity() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let mut scanner = LatestScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; + let mut scanner = + LatestScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; let _stream = scanner.subscribe(EventFilter::new()); let sender = &scanner.listeners[0].sender; assert_eq!(sender.capacity(), MAX_BUFFERED_MESSAGES); diff --git a/src/event_scanner/modes/live.rs b/src/event_scanner/modes/live.rs index 35aed94f..b2be5161 100644 --- a/src/event_scanner/modes/live.rs +++ b/src/event_scanner/modes/live.rs @@ -180,7 +180,8 @@ mod tests { #[tokio::test] async fn test_live_event_stream_listeners_vector_updates() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let mut scanner = LiveScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; + let mut scanner = + LiveScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; assert_eq!(scanner.listeners.len(), 0); let _stream1 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 1); @@ -193,7 +194,8 @@ mod tests { #[tokio::test] async fn test_live_event_stream_channel_capacity() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let mut scanner = LiveScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; + let mut scanner = + LiveScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; let _stream = scanner.subscribe(EventFilter::new()); let sender = &scanner.listeners[0].sender; assert_eq!(sender.capacity(), MAX_BUFFERED_MESSAGES); diff --git a/src/event_scanner/modes/sync.rs b/src/event_scanner/modes/sync.rs index 2c0a0a01..8000d924 100644 --- a/src/event_scanner/modes/sync.rs +++ b/src/event_scanner/modes/sync.rs @@ -214,7 +214,8 @@ mod tests { #[tokio::test] async fn test_sync_event_stream_listeners_vector_updates() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let mut scanner = SyncScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; + let mut scanner = + SyncScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; assert_eq!(scanner.listeners.len(), 0); let _stream1 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 1); @@ -227,7 +228,8 @@ mod tests { #[tokio::test] async fn test_sync_event_stream_channel_capacity() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let mut scanner = SyncScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; + let mut scanner = + SyncScannerBuilder::::new().connect_ws(anvil.ws_endpoint_url()).await?; let _stream = scanner.subscribe(EventFilter::new()); let sender = &scanner.listeners[0].sender; assert_eq!(sender.capacity(), MAX_BUFFERED_MESSAGES); From 0546feb8c6b1d32a401215974341ab3d33d800b6 Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 27 Oct 2025 23:18:25 +0900 Subject: [PATCH 039/122] ref: update test --- src/safe_provider.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index d454c32c..d1b1d869 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -217,12 +217,11 @@ mod tests { let result = provider .retry_with_total_timeout(|| async { call_count.fetch_add(1, Ordering::SeqCst); - Ok(42) + Ok(call_count.load(Ordering::SeqCst)) }) .await; - assert!(matches!(result, Ok(42))); - assert_eq!(call_count.load(Ordering::SeqCst), 1); + assert!(matches!(result, Ok(1))); } #[tokio::test] @@ -237,14 +236,12 @@ mod tests { if call_count.load(Ordering::SeqCst) < 3 { Err(TransportErrorKind::custom_str("temporary error")) } else { - Ok(42) + Ok(call_count.load(Ordering::SeqCst)) } }) .await; - assert!(result.is_ok()); - assert_eq!(result.unwrap(), 42); - assert_eq!(call_count.load(Ordering::SeqCst), 3); + assert!(matches!(result, Ok(3))); } #[tokio::test] From 01a9cf117697bc480d0be598fb4c9e0ec8eaf5eb Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 00:08:25 +0900 Subject: [PATCH 040/122] feat: add custom error for safe provider --- src/error.rs | 18 +++++++- src/event_scanner/message.rs | 9 +++- src/event_scanner/modes/common.rs | 5 +-- src/safe_provider.rs | 74 ++++++++++++++++++++----------- 4 files changed, 76 insertions(+), 30 deletions(-) diff --git a/src/error.rs b/src/error.rs index 20801b90..7eb6b887 100644 --- a/src/error.rs +++ b/src/error.rs @@ -7,7 +7,7 @@ use alloy::{ }; use thiserror::Error; -use crate::block_range_scanner::Message; +use crate::{block_range_scanner::Message, safe_provider::SafeProviderError}; #[derive(Error, Debug, Clone)] pub enum ScannerError { @@ -42,6 +42,22 @@ pub enum ScannerError { #[error("Block not found, block number: {0}")] BlockNotFound(BlockNumberOrTag), + + #[error("Operation timed out")] + Timeout, + + #[error("Retry failed after {0} tries")] + RetryFail(usize), +} + +impl From for ScannerError { + fn from(error: SafeProviderError) -> ScannerError { + match error { + SafeProviderError::RpcError(err) => ScannerError::RpcError(err), + SafeProviderError::Timeout => ScannerError::Timeout, + SafeProviderError::RetryFail(num) => ScannerError::RetryFail(num), + } + } } impl From, ScannerError>> for Message { diff --git a/src/event_scanner/message.rs b/src/event_scanner/message.rs index 5f916388..1a2480f0 100644 --- a/src/event_scanner/message.rs +++ b/src/event_scanner/message.rs @@ -1,6 +1,6 @@ use alloy::{rpc::types::Log, sol_types::SolEvent}; -use crate::{ScannerError, ScannerMessage}; +use crate::{ScannerError, ScannerMessage, safe_provider::SafeProviderError}; pub type Message = ScannerMessage, ScannerError>; @@ -10,6 +10,13 @@ impl From> for Message { } } +impl From for Message { + fn from(error: SafeProviderError) -> Message { + let scanner_error: ScannerError = error.into(); + scanner_error.into() + } +} + impl PartialEq> for Message { fn eq(&self, other: &Vec) -> bool { self.eq(&other.as_slice()) diff --git a/src/event_scanner/modes/common.rs b/src/event_scanner/modes/common.rs index b31c4ded..62cc0ca8 100644 --- a/src/event_scanner/modes/common.rs +++ b/src/event_scanner/modes/common.rs @@ -3,12 +3,11 @@ use std::ops::RangeInclusive; use crate::{ block_range_scanner::{MAX_BUFFERED_MESSAGES, Message as BlockRangeMessage}, event_scanner::{filter::EventFilter, listener::EventListener, message::Message}, - safe_provider::SafeProvider, + safe_provider::{SafeProvider, SafeProviderError}, }; use alloy::{ network::Network, rpc::types::{Filter, Log}, - transports::{RpcError, TransportErrorKind}, }; use tokio::sync::{ broadcast::{self, Sender, error::RecvError}, @@ -130,7 +129,7 @@ async fn get_logs( event_filter: &EventFilter, log_filter: &Filter, provider: &SafeProvider, -) -> Result, RpcError> { +) -> Result, SafeProviderError> { let log_filter = log_filter.clone().from_block(*range.start()).to_block(*range.end()); match provider.get_logs(&log_filter).await { diff --git a/src/safe_provider.rs b/src/safe_provider.rs index d1b1d869..5563986d 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -1,4 +1,4 @@ -use std::{future::Future, time::Duration}; +use std::{future::Future, sync::Arc, time::Duration}; use alloy::{ eips::BlockNumberOrTag, @@ -9,8 +9,25 @@ use alloy::{ transports::{RpcError, TransportErrorKind}, }; use backon::{ExponentialBuilder, Retryable}; +use thiserror::Error; use tracing::{error, info}; +#[derive(Error, Debug, Clone)] +pub enum SafeProviderError { + #[error("RPC error: {0}")] + RpcError(Arc>), + #[error("Operation timed out")] + Timeout, + #[error("Retry failed after {0} tries")] + RetryFail(usize), +} + +impl From> for SafeProviderError { + fn from(err: RpcError) -> Self { + SafeProviderError::RpcError(Arc::new(err)) + } +} + /// Safe provider wrapper with built-in retry and timeout mechanisms. /// /// This wrapper around Alloy providers automatically handles retries, @@ -70,9 +87,11 @@ impl SafeProvider { pub async fn get_block_by_number( &self, number: BlockNumberOrTag, - ) -> Result, RpcError> { + ) -> Result, SafeProviderError> { info!("eth_getBlockByNumber called"); - let operation = async || self.provider.get_block_by_number(number).await; + let operation = async || { + self.provider.get_block_by_number(number).await.map_err(SafeProviderError::from) + }; let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!(error = %e, "eth_getByBlockNumber failed"); @@ -86,9 +105,10 @@ impl SafeProvider { /// /// Returns an error if RPC call fails repeatedly even /// after exhausting retries or if the call times out. - pub async fn get_block_number(&self) -> Result> { + pub async fn get_block_number(&self) -> Result { info!("eth_getBlockNumber called"); - let operation = || self.provider.get_block_number(); + let operation = + async || self.provider.get_block_number().await.map_err(SafeProviderError::from); let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!(error = %e, "eth_getBlockNumber failed"); @@ -105,9 +125,10 @@ impl SafeProvider { pub async fn get_block_by_hash( &self, hash: alloy::primitives::BlockHash, - ) -> Result, RpcError> { + ) -> Result, SafeProviderError> { info!("eth_getBlockByHash called"); - let operation = async || self.provider.get_block_by_hash(hash).await; + let operation = + async || self.provider.get_block_by_hash(hash).await.map_err(SafeProviderError::from); let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!(error = %e, "eth_getBlockByHash failed"); @@ -121,12 +142,10 @@ impl SafeProvider { /// /// Returns an error if RPC call fails repeatedly even /// after exhausting retries or if the call times out. - pub async fn get_logs( - &self, - filter: &Filter, - ) -> Result, RpcError> { + pub async fn get_logs(&self, filter: &Filter) -> Result, SafeProviderError> { info!("eth_getLogs called"); - let operation = || self.provider.get_logs(filter); + let operation = + async || self.provider.get_logs(filter).await.map_err(SafeProviderError::from); let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!(error = %e, "eth_getLogs failed"); @@ -142,11 +161,14 @@ impl SafeProvider { /// after exhausting retries or if the call times out. pub async fn subscribe_blocks( &self, - ) -> Result, RpcError> { + ) -> Result, SafeProviderError> { info!("eth_subscribe called"); let provider = self.provider.clone(); - let result = - self.retry_with_total_timeout(|| async { provider.subscribe_blocks().await }).await; + let result = self + .retry_with_total_timeout(|| async { + provider.subscribe_blocks().await.map_err(SafeProviderError::from) + }) + .await; if let Err(e) = &result { error!(error = %e, "eth_subscribe failed"); } @@ -167,10 +189,10 @@ impl SafeProvider { async fn retry_with_total_timeout( &self, operation: F, - ) -> Result> + ) -> Result where F: Fn() -> Fut, - Fut: Future>>, + Fut: Future>, { let retry_strategy = ExponentialBuilder::default() .with_max_times(self.max_retries) @@ -182,8 +204,9 @@ impl SafeProvider { ) .await { - Ok(res) => res, - Err(_) => Err(TransportErrorKind::custom_str("total operation timeout exceeded")), + Ok(Ok(res)) => Ok(res), + Ok(Err(_)) => Err(SafeProviderError::RetryFail(self.max_retries + 1)), + Err(_) => Err(SafeProviderError::Timeout), } } } @@ -234,7 +257,9 @@ mod tests { .retry_with_total_timeout(|| async { call_count.fetch_add(1, Ordering::SeqCst); if call_count.load(Ordering::SeqCst) < 3 { - Err(TransportErrorKind::custom_str("temporary error")) + Err(SafeProviderError::RpcError(Arc::new(TransportErrorKind::custom_str( + "temp error", + )))) } else { Ok(call_count.load(Ordering::SeqCst)) } @@ -253,14 +278,13 @@ mod tests { let result = provider .retry_with_total_timeout(|| async { call_count.fetch_add(1, Ordering::SeqCst); - Err::>(TransportErrorKind::custom_str( - "permanent error", - )) + // permanent error + Err::(SafeProviderError::Timeout) }) .await; let err = result.unwrap_err(); - assert!(err.to_string().contains("permanent error"),); + assert!(matches!(err, SafeProviderError::RetryFail(3))); assert_eq!(call_count.load(Ordering::SeqCst), 3); } @@ -277,6 +301,6 @@ mod tests { .await; let err = result.unwrap_err(); - assert!(err.to_string().contains("total operation timeout exceeded"),); + assert!(matches!(err, SafeProviderError::Timeout)); } } From a82b12fa817d6f6e5012ae9e1ddfbe43373341ea Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 00:11:02 +0900 Subject: [PATCH 041/122] ref: remove moves --- src/safe_provider.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index 5563986d..91547e85 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -294,7 +294,7 @@ mod tests { let provider = test_provider(max_timeout, 10, 1); let result = provider - .retry_with_total_timeout(move || async move { + .retry_with_total_timeout(|| async { sleep(Duration::from_millis(max_timeout + 10)).await; Ok(42) }) From 426491458f4209afa70673e8f264616b924d1c04 Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 00:23:03 +0900 Subject: [PATCH 042/122] fix: update test post merge --- src/safe_provider.rs | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index 093fbbc7..63fc42a4 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -335,14 +335,17 @@ mod tests { let call_count_clone = call_count.clone(); let result = provider - .retry_with_total_timeout(|| async { - call_count.fetch_add(1, Ordering::SeqCst); - if call_count.load(Ordering::SeqCst) < 3 { - Err(SafeProviderError::RpcError(Arc::new(TransportErrorKind::custom_str( - "temp error", - )))) - } else { - Ok(call_count.load(Ordering::SeqCst)) + .retry_with_total_timeout(move |_provider| { + let call_count = call_count_clone.clone(); + async move { + let count = call_count.fetch_add(1, Ordering::SeqCst) + 1; + if count < 3 { + Err(SafeProviderError::RpcError(Arc::new(TransportErrorKind::custom_str( + "temp error", + )))) + } else { + Ok(count) + } } }) .await; @@ -358,10 +361,13 @@ mod tests { let call_count_clone = call_count.clone(); let result = provider - .retry_with_total_timeout(move |_provider| async { - call_count.fetch_add(1, Ordering::SeqCst); - // permanent error - Err::(SafeProviderError::Timeout) + .retry_with_total_timeout(move |_provider| { + let call_count = call_count_clone.clone(); + async move { + call_count.fetch_add(1, Ordering::SeqCst); + // permanent error + Err::(SafeProviderError::Timeout) + } }) .await; @@ -376,7 +382,7 @@ mod tests { let provider = test_provider(max_timeout, 10, 1); let result = provider - .retry_with_total_timeout(|| async { + .retry_with_total_timeout(move |_provider| async move { sleep(Duration::from_millis(max_timeout + 10)).await; Ok(42) }) From 72b8eaf6bb833992306247ebe4a3d819b832f5ba Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 00:26:34 +0900 Subject: [PATCH 043/122] ref: remove filter clone --- src/safe_provider.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/safe_provider.rs b/src/safe_provider.rs index 63fc42a4..3e29ab6a 100644 --- a/src/safe_provider.rs +++ b/src/safe_provider.rs @@ -162,9 +162,8 @@ impl SafeProvider { pub async fn get_logs(&self, filter: &Filter) -> Result, SafeProviderError> { info!("eth_getLogs called"); let result = self - .retry_with_total_timeout(move |provider| { - let filter = filter.clone(); - async move { provider.get_logs(&filter).await.map_err(SafeProviderError::from) } + .retry_with_total_timeout(move |provider| async move { + provider.get_logs(filter).await.map_err(SafeProviderError::from) }) .await; if let Err(e) = &result { From 1ee07ca6ece43f4f9cd584c9550758c062a4a4ee Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 17:11:20 +0900 Subject: [PATCH 044/122] ref: rename safe to robust provider --- src/block_range_scanner.rs | 26 +++++----- src/error.rs | 12 ++--- src/event_scanner/message.rs | 6 +-- src/event_scanner/modes/common.rs | 10 ++-- src/lib.rs | 2 +- src/{safe_provider.rs => robust_provider.rs} | 54 ++++++++++---------- 6 files changed, 55 insertions(+), 55 deletions(-) rename src/{safe_provider.rs => robust_provider.rs} (85%) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 1fe78f56..a690a4a2 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -72,8 +72,8 @@ use tokio_stream::{StreamExt, wrappers::ReceiverStream}; use crate::{ error::ScannerError, - safe_provider::{ - DEFAULT_MAX_RETRIES, DEFAULT_MAX_TIMEOUT, DEFAULT_RETRY_INTERVAL, SafeProvider, + robust_provider::{ + DEFAULT_MAX_RETRIES, DEFAULT_MAX_TIMEOUT, DEFAULT_RETRY_INTERVAL, RobustProvider, }, types::{ScannerMessage, ScannerStatus}, }; @@ -198,26 +198,26 @@ impl BlockRangeScanner { /// Returns an error if the connection fails #[must_use] pub fn connect(self, provider: RootProvider) -> ConnectedBlockRangeScanner { - let safe_provider = SafeProvider::new(provider) + let robust_provider = RobustProvider::new(provider) .max_timeout(self.max_timeout) .max_retries(self.max_retries) .retry_interval(self.retry_interval); ConnectedBlockRangeScanner { - provider: safe_provider, + provider: robust_provider, max_block_range: self.max_block_range, } } } pub struct ConnectedBlockRangeScanner { - provider: SafeProvider, + provider: RobustProvider, max_block_range: u64, } impl ConnectedBlockRangeScanner { - /// Returns the `SafeProvider` + /// Returns the `RobustProvider` #[must_use] - pub fn provider(&self) -> &SafeProvider { + pub fn provider(&self) -> &RobustProvider { &self.provider } @@ -269,7 +269,7 @@ pub enum Command { } struct Service { - provider: SafeProvider, + provider: RobustProvider, max_block_range: u64, subscriber: Option>, websocket_connected: bool, @@ -280,7 +280,7 @@ struct Service { } impl Service { - pub fn new(provider: SafeProvider, max_block_range: u64) -> (Self, mpsc::Sender) { + pub fn new(provider: RobustProvider, max_block_range: u64) -> (Self, mpsc::Sender) { let (cmd_tx, cmd_rx) = mpsc::channel(100); let service = Self { @@ -678,7 +678,7 @@ impl Service { async fn stream_live_blocks( mut range_start: BlockNumber, - provider: SafeProvider, + provider: RobustProvider, sender: mpsc::Sender, block_confirmations: u64, max_block_range: u64, @@ -783,7 +783,7 @@ impl Service { } async fn get_block_subscription( - provider: &SafeProvider, + provider: &RobustProvider, ) -> Result, ScannerError> { let ws_stream = provider .subscribe_blocks() @@ -1018,9 +1018,9 @@ mod tests { use tokio::sync::mpsc; use tokio_stream::StreamExt; - fn mocked_provider(asserter: Asserter) -> SafeProvider { + fn mocked_provider(asserter: Asserter) -> RobustProvider { let root_provider = RootProvider::new(RpcClient::mocked(asserter)); - SafeProvider::new(root_provider) + RobustProvider::new(root_provider) } #[test] diff --git a/src/error.rs b/src/error.rs index 7eb6b887..4ea30a15 100644 --- a/src/error.rs +++ b/src/error.rs @@ -7,7 +7,7 @@ use alloy::{ }; use thiserror::Error; -use crate::{block_range_scanner::Message, safe_provider::SafeProviderError}; +use crate::{block_range_scanner::Message, robust_provider::RobustProviderError}; #[derive(Error, Debug, Clone)] pub enum ScannerError { @@ -50,12 +50,12 @@ pub enum ScannerError { RetryFail(usize), } -impl From for ScannerError { - fn from(error: SafeProviderError) -> ScannerError { +impl From for ScannerError { + fn from(error: RobustProviderError) -> ScannerError { match error { - SafeProviderError::RpcError(err) => ScannerError::RpcError(err), - SafeProviderError::Timeout => ScannerError::Timeout, - SafeProviderError::RetryFail(num) => ScannerError::RetryFail(num), + RobustProviderError::RpcError(err) => ScannerError::RpcError(err), + RobustProviderError::Timeout => ScannerError::Timeout, + RobustProviderError::RetryFail(num) => ScannerError::RetryFail(num), } } } diff --git a/src/event_scanner/message.rs b/src/event_scanner/message.rs index 1a2480f0..33da6c61 100644 --- a/src/event_scanner/message.rs +++ b/src/event_scanner/message.rs @@ -1,6 +1,6 @@ use alloy::{rpc::types::Log, sol_types::SolEvent}; -use crate::{ScannerError, ScannerMessage, safe_provider::SafeProviderError}; +use crate::{ScannerError, ScannerMessage, robust_provider::RobustProviderError}; pub type Message = ScannerMessage, ScannerError>; @@ -10,8 +10,8 @@ impl From> for Message { } } -impl From for Message { - fn from(error: SafeProviderError) -> Message { +impl From for Message { + fn from(error: RobustProviderError) -> Message { let scanner_error: ScannerError = error.into(); scanner_error.into() } diff --git a/src/event_scanner/modes/common.rs b/src/event_scanner/modes/common.rs index 62cc0ca8..59748dce 100644 --- a/src/event_scanner/modes/common.rs +++ b/src/event_scanner/modes/common.rs @@ -3,7 +3,7 @@ use std::ops::RangeInclusive; use crate::{ block_range_scanner::{MAX_BUFFERED_MESSAGES, Message as BlockRangeMessage}, event_scanner::{filter::EventFilter, listener::EventListener, message::Message}, - safe_provider::{SafeProvider, SafeProviderError}, + robust_provider::{RobustProvider, RobustProviderError}, }; use alloy::{ network::Network, @@ -24,7 +24,7 @@ pub enum ConsumerMode { pub async fn handle_stream( mut stream: ReceiverStream, - provider: &SafeProvider, + provider: &RobustProvider, listeners: &[EventListener], mode: ConsumerMode, ) { @@ -41,7 +41,7 @@ pub async fn handle_stream( } pub fn spawn_log_consumers( - provider: &SafeProvider, + provider: &RobustProvider, listeners: &[EventListener], range_tx: &Sender, mode: ConsumerMode, @@ -128,8 +128,8 @@ async fn get_logs( range: RangeInclusive, event_filter: &EventFilter, log_filter: &Filter, - provider: &SafeProvider, -) -> Result, SafeProviderError> { + provider: &RobustProvider, +) -> Result, RobustProviderError> { let log_filter = log_filter.clone().from_block(*range.start()).to_block(*range.end()); match provider.get_logs(&log_filter).await { diff --git a/src/lib.rs b/src/lib.rs index 28165f66..69d93152 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,7 +1,7 @@ pub mod block_range_scanner; pub mod error; pub mod event_scanner; -mod safe_provider; +mod robust_provider; #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; pub mod types; diff --git a/src/safe_provider.rs b/src/robust_provider.rs similarity index 85% rename from src/safe_provider.rs rename to src/robust_provider.rs index 91547e85..1b8aab78 100644 --- a/src/safe_provider.rs +++ b/src/robust_provider.rs @@ -13,7 +13,7 @@ use thiserror::Error; use tracing::{error, info}; #[derive(Error, Debug, Clone)] -pub enum SafeProviderError { +pub enum RobustProviderError { #[error("RPC error: {0}")] RpcError(Arc>), #[error("Operation timed out")] @@ -22,9 +22,9 @@ pub enum SafeProviderError { RetryFail(usize), } -impl From> for SafeProviderError { +impl From> for RobustProviderError { fn from(err: RpcError) -> Self { - SafeProviderError::RpcError(Arc::new(err)) + RobustProviderError::RpcError(Arc::new(err)) } } @@ -33,7 +33,7 @@ impl From> for SafeProviderError { /// This wrapper around Alloy providers automatically handles retries, /// timeouts, and error logging for RPC calls. #[derive(Clone)] -pub struct SafeProvider { +pub struct RobustProvider { provider: RootProvider, max_timeout: Duration, max_retries: usize, @@ -41,15 +41,15 @@ pub struct SafeProvider { } // RPC retry and timeout settings -/// Default timeout used by `SafeProvider` +/// Default timeout used by `RobustProvider` pub const DEFAULT_MAX_TIMEOUT: Duration = Duration::from_secs(30); /// Default maximum number of retry attempts. pub const DEFAULT_MAX_RETRIES: usize = 5; /// Default base delay between retries. pub const DEFAULT_RETRY_INTERVAL: Duration = Duration::from_secs(1); -impl SafeProvider { - /// Create a new `SafeProvider` with default settings. +impl RobustProvider { + /// Create a new `RobustProvider` with default settings. #[must_use] pub fn new(provider: RootProvider) -> Self { Self { @@ -87,10 +87,10 @@ impl SafeProvider { pub async fn get_block_by_number( &self, number: BlockNumberOrTag, - ) -> Result, SafeProviderError> { + ) -> Result, RobustProviderError> { info!("eth_getBlockByNumber called"); let operation = async || { - self.provider.get_block_by_number(number).await.map_err(SafeProviderError::from) + self.provider.get_block_by_number(number).await.map_err(RobustProviderError::from) }; let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { @@ -105,10 +105,10 @@ impl SafeProvider { /// /// Returns an error if RPC call fails repeatedly even /// after exhausting retries or if the call times out. - pub async fn get_block_number(&self) -> Result { + pub async fn get_block_number(&self) -> Result { info!("eth_getBlockNumber called"); let operation = - async || self.provider.get_block_number().await.map_err(SafeProviderError::from); + async || self.provider.get_block_number().await.map_err(RobustProviderError::from); let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!(error = %e, "eth_getBlockNumber failed"); @@ -125,10 +125,10 @@ impl SafeProvider { pub async fn get_block_by_hash( &self, hash: alloy::primitives::BlockHash, - ) -> Result, SafeProviderError> { + ) -> Result, RobustProviderError> { info!("eth_getBlockByHash called"); let operation = - async || self.provider.get_block_by_hash(hash).await.map_err(SafeProviderError::from); + async || self.provider.get_block_by_hash(hash).await.map_err(RobustProviderError::from); let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!(error = %e, "eth_getBlockByHash failed"); @@ -142,10 +142,10 @@ impl SafeProvider { /// /// Returns an error if RPC call fails repeatedly even /// after exhausting retries or if the call times out. - pub async fn get_logs(&self, filter: &Filter) -> Result, SafeProviderError> { + pub async fn get_logs(&self, filter: &Filter) -> Result, RobustProviderError> { info!("eth_getLogs called"); let operation = - async || self.provider.get_logs(filter).await.map_err(SafeProviderError::from); + async || self.provider.get_logs(filter).await.map_err(RobustProviderError::from); let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!(error = %e, "eth_getLogs failed"); @@ -161,12 +161,12 @@ impl SafeProvider { /// after exhausting retries or if the call times out. pub async fn subscribe_blocks( &self, - ) -> Result, SafeProviderError> { + ) -> Result, RobustProviderError> { info!("eth_subscribe called"); let provider = self.provider.clone(); let result = self .retry_with_total_timeout(|| async { - provider.subscribe_blocks().await.map_err(SafeProviderError::from) + provider.subscribe_blocks().await.map_err(RobustProviderError::from) }) .await; if let Err(e) = &result { @@ -189,10 +189,10 @@ impl SafeProvider { async fn retry_with_total_timeout( &self, operation: F, - ) -> Result + ) -> Result where F: Fn() -> Fut, - Fut: Future>, + Fut: Future>, { let retry_strategy = ExponentialBuilder::default() .with_max_times(self.max_retries) @@ -205,8 +205,8 @@ impl SafeProvider { .await { Ok(Ok(res)) => Ok(res), - Ok(Err(_)) => Err(SafeProviderError::RetryFail(self.max_retries + 1)), - Err(_) => Err(SafeProviderError::Timeout), + Ok(Err(_)) => Err(RobustProviderError::RetryFail(self.max_retries + 1)), + Err(_) => Err(RobustProviderError::Timeout), } } } @@ -222,8 +222,8 @@ mod tests { timeout: u64, max_retries: usize, retry_interval: u64, - ) -> SafeProvider { - SafeProvider { + ) -> RobustProvider { + RobustProvider { provider: RootProvider::new_http("http://localhost:8545".parse().unwrap()), max_timeout: Duration::from_millis(timeout), max_retries, @@ -257,7 +257,7 @@ mod tests { .retry_with_total_timeout(|| async { call_count.fetch_add(1, Ordering::SeqCst); if call_count.load(Ordering::SeqCst) < 3 { - Err(SafeProviderError::RpcError(Arc::new(TransportErrorKind::custom_str( + Err(RobustProviderError::RpcError(Arc::new(TransportErrorKind::custom_str( "temp error", )))) } else { @@ -279,12 +279,12 @@ mod tests { .retry_with_total_timeout(|| async { call_count.fetch_add(1, Ordering::SeqCst); // permanent error - Err::(SafeProviderError::Timeout) + Err::(RobustProviderError::Timeout) }) .await; let err = result.unwrap_err(); - assert!(matches!(err, SafeProviderError::RetryFail(3))); + assert!(matches!(err, RobustProviderError::RetryFail(3))); assert_eq!(call_count.load(Ordering::SeqCst), 3); } @@ -301,6 +301,6 @@ mod tests { .await; let err = result.unwrap_err(); - assert!(matches!(err, SafeProviderError::Timeout)); + assert!(matches!(err, RobustProviderError::Timeout)); } } From f2b6d47f238ce9e5a34716cc9f5e2876b80d27f4 Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 17:13:13 +0900 Subject: [PATCH 045/122] ref: comment --- src/robust_provider.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 1b8aab78..ef3890df 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -28,7 +28,7 @@ impl From> for RobustProviderError { } } -/// Safe provider wrapper with built-in retry and timeout mechanisms. +/// Provider wrapper with built-in retry and timeout mechanisms. /// /// This wrapper around Alloy providers automatically handles retries, /// timeouts, and error logging for RPC calls. From 9bae33b9278c68f2ed69e94bdf13404d5ca690e2 Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 17:16:50 +0900 Subject: [PATCH 046/122] fix: rename --- src/block_range_scanner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index e5655ae8..c3be328f 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -212,7 +212,7 @@ impl BlockRangeScanner { #[must_use] pub fn connect(self, provider: RootProvider) -> ConnectedBlockRangeScanner { provider.client().expect_pubsub_frontend(); - let safe_provider = RobustProvider::new(provider) + let robust_provider = RobustProvider::new(provider) .max_timeout(self.max_timeout) .max_retries(self.max_retries) .retry_interval(self.retry_interval); From f622b21385b41817f97e8f7e9c031c1bb17b1fad Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 18:42:43 +0900 Subject: [PATCH 047/122] feat: move block not found to provider --- src/block_range_scanner.rs | 45 ++++++++++---------------------------- src/error.rs | 15 ++----------- src/robust_provider.rs | 10 +++++++-- 3 files changed, 22 insertions(+), 48 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index a690a4a2..23642b5f 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -65,8 +65,8 @@ use std::{cmp::Ordering, ops::RangeInclusive, time::Duration}; use tokio::{ - join, sync::{mpsc, oneshot}, + try_join, }; use tokio_stream::{StreamExt, wrappers::ReceiverStream}; @@ -398,10 +398,8 @@ impl Service { self.provider.get_block_by_number(end_height) )?; - let start_block_num = - start_block.ok_or_else(|| ScannerError::BlockNotFound(start_height))?.header().number(); - let end_block_num = - end_block.ok_or_else(|| ScannerError::BlockNotFound(end_height))?.header().number(); + let start_block_num = start_block.header().number(); + let end_block_num = end_block.header().number(); let (start_block_num, end_block_num) = match start_block_num.cmp(&end_block_num) { Ordering::Greater => (end_block_num, start_block_num), @@ -435,12 +433,8 @@ impl Service { self.provider.get_block_by_number(BlockNumberOrTag::Latest) )?; - let start_block_num = - start_block.ok_or_else(|| ScannerError::BlockNotFound(start_height))?.header().number(); - let latest_block = latest_block - .ok_or_else(|| ScannerError::BlockNotFound(BlockNumberOrTag::Latest))? - .header() - .number(); + let start_block_num = start_block.header().number(); + let latest_block = latest_block.header().number(); let confirmed_tip_num = latest_block.saturating_sub(block_confirmations); @@ -536,13 +530,10 @@ impl Service { start_height: BlockNumberOrTag, end_height: BlockNumberOrTag, ) -> Result<(), ScannerError> { - let (start_block, end_block) = join!( + let (start_block, end_block) = try_join!( self.provider.get_block_by_number(start_height), self.provider.get_block_by_number(end_height), - ); - - let start_block = start_block?.ok_or(ScannerError::BlockNotFound(start_height))?; - let end_block = end_block?.ok_or(ScannerError::BlockNotFound(end_height))?; + )?; // normalize block range let (from, to) = match start_block.header().number().cmp(&end_block.header().number()) { @@ -606,13 +597,7 @@ impl Service { // restart rewind batch_from = from; // store the updated end block hash - tip_hash = self - .provider - .get_block_by_number(from.into()) - .await? - .expect("Chain should have the same height post-reorg") - .header() - .hash(); + tip_hash = self.provider.get_block_by_number(from.into()).await?.header().hash(); } else { // SAFETY: `batch_to` is always greater than `to`, so `batch_to - 1` is always // a valid unsigned integer @@ -785,11 +770,7 @@ impl Service { async fn get_block_subscription( provider: &RobustProvider, ) -> Result, ScannerError> { - let ws_stream = provider - .subscribe_blocks() - .await - .map_err(|_| ScannerError::WebSocketConnectionFailed(1))?; - + let ws_stream = provider.subscribe_blocks().await?; Ok(ws_stream) } @@ -1656,13 +1637,11 @@ mod tests { let (tx, mut rx) = mpsc::channel(1); service.subscriber = Some(tx); - service - .send_to_subscriber(Message::Error(ScannerError::WebSocketConnectionFailed(4))) - .await; + service.send_to_subscriber(Message::Error(ScannerError::BlockNotFound(4.into()))).await; match rx.recv().await.expect("subscriber should stay open") { - Message::Error(ScannerError::WebSocketConnectionFailed(attempts)) => { - assert_eq!(attempts, 4); + Message::Error(ScannerError::BlockNotFound(attempts)) => { + assert_eq!(attempts, 4.into()); } other => panic!("unexpected message: {other:?}"), } diff --git a/src/error.rs b/src/error.rs index 4ea30a15..d168548a 100644 --- a/src/error.rs +++ b/src/error.rs @@ -3,7 +3,7 @@ use std::{ops::RangeInclusive, sync::Arc}; use alloy::{ eips::BlockNumberOrTag, primitives::BlockNumber, - transports::{RpcError, TransportErrorKind, http::reqwest}, + transports::{RpcError, TransportErrorKind}, }; use thiserror::Error; @@ -11,9 +11,6 @@ use crate::{block_range_scanner::Message, robust_provider::RobustProviderError}; #[derive(Error, Debug, Clone)] pub enum ScannerError { - #[error("HTTP request failed: {0}")] - HttpError(Arc), - // #[error("WebSocket error: {0}")] // WebSocketError(#[from] tokio_tungstenite::tungstenite::Error), #[error("Serialization error: {0}")] @@ -37,9 +34,6 @@ pub enum ScannerError { #[error("Historical sync failed: {0}")] HistoricalSyncError(String), - #[error("WebSocket connection failed after {0} attempts")] - WebSocketConnectionFailed(usize), - #[error("Block not found, block number: {0}")] BlockNotFound(BlockNumberOrTag), @@ -56,6 +50,7 @@ impl From for ScannerError { RobustProviderError::RpcError(err) => ScannerError::RpcError(err), RobustProviderError::Timeout => ScannerError::Timeout, RobustProviderError::RetryFail(num) => ScannerError::RetryFail(num), + RobustProviderError::BlockNotFound(block) => ScannerError::BlockNotFound(block), } } } @@ -69,12 +64,6 @@ impl From, ScannerError>> for Message { } } -impl From for ScannerError { - fn from(error: reqwest::Error) -> Self { - ScannerError::HttpError(Arc::new(error)) - } -} - impl From for ScannerError { fn from(error: serde_json::Error) -> Self { ScannerError::SerializationError(Arc::new(error)) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index ef3890df..ab66d4a7 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -20,6 +20,8 @@ pub enum RobustProviderError { Timeout, #[error("Retry failed after {0} tries")] RetryFail(usize), + #[error("Block not found, block number: {0}")] + BlockNotFound(BlockNumberOrTag), } impl From> for RobustProviderError { @@ -87,7 +89,7 @@ impl RobustProvider { pub async fn get_block_by_number( &self, number: BlockNumberOrTag, - ) -> Result, RobustProviderError> { + ) -> Result { info!("eth_getBlockByNumber called"); let operation = async || { self.provider.get_block_by_number(number).await.map_err(RobustProviderError::from) @@ -96,7 +98,11 @@ impl RobustProvider { if let Err(e) = &result { error!(error = %e, "eth_getByBlockNumber failed"); } - result + + match result? { + Some(block) => Ok(block), + None => Err(RobustProviderError::BlockNotFound(number)), + } } /// Fetch the latest block number with retry and timeout. From 058a3edcb5934b1cb2a04167a90ba25229272e8e Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 18:58:06 +0900 Subject: [PATCH 048/122] fix: doc test --- src/block_range_scanner.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 23642b5f..517880cc 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -41,11 +41,6 @@ //! error!("Received error from subscription: {e}"); //! match e { //! ScannerError::ServiceShutdown => break, -//! ScannerError::WebSocketConnectionFailed(_) => { -//! error!( -//! "WebSocket connection failed, continuing to listen for reconnection" -//! ); -//! } //! _ => { //! error!("Non-fatal error, continuing: {e}"); //! } From 14ed0e4dda0f7a9c378968255f661848e437f4aa Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 19:05:34 +0900 Subject: [PATCH 049/122] ref: use matches --- src/block_range_scanner.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 517880cc..db0d7f59 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -1635,8 +1635,8 @@ mod tests { service.send_to_subscriber(Message::Error(ScannerError::BlockNotFound(4.into()))).await; match rx.recv().await.expect("subscriber should stay open") { - Message::Error(ScannerError::BlockNotFound(attempts)) => { - assert_eq!(attempts, 4.into()); + Message::Error(err) => { + assert!(matches!(err, ScannerError::BlockNotFound(BlockNumberOrTag::Number(4)))); } other => panic!("unexpected message: {other:?}"), } From a531b7dd25af51bbe4dd56e9ec07122d7b98358d Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 19:13:30 +0900 Subject: [PATCH 050/122] ref: refactor ok or else --- src/robust_provider.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index ab66d4a7..2cf05eec 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -99,10 +99,7 @@ impl RobustProvider { error!(error = %e, "eth_getByBlockNumber failed"); } - match result? { - Some(block) => Ok(block), - None => Err(RobustProviderError::BlockNotFound(number)), - } + result?.ok_or_else(|| RobustProviderError::BlockNotFound(number)) } /// Fetch the latest block number with retry and timeout. From 435dee3fcef737a20a037185b55922a1fea5e716 Mon Sep 17 00:00:00 2001 From: Nenad Date: Tue, 28 Oct 2025 11:14:20 +0100 Subject: [PATCH 051/122] fix: Retry updates (#141) Co-authored-by: Leo --- src/error.rs | 9 ++-- src/event_scanner/message.rs | 2 +- src/event_scanner/modes/common.rs | 2 +- src/robust_provider.rs | 83 ++++++++++++------------------- 4 files changed, 37 insertions(+), 59 deletions(-) diff --git a/src/error.rs b/src/error.rs index d168548a..cd4b4926 100644 --- a/src/error.rs +++ b/src/error.rs @@ -7,7 +7,7 @@ use alloy::{ }; use thiserror::Error; -use crate::{block_range_scanner::Message, robust_provider::RobustProviderError}; +use crate::{block_range_scanner::Message, robust_provider::Error as RobustProviderError}; #[derive(Error, Debug, Clone)] pub enum ScannerError { @@ -40,16 +40,15 @@ pub enum ScannerError { #[error("Operation timed out")] Timeout, - #[error("Retry failed after {0} tries")] - RetryFail(usize), + #[error("RPC call failed after exhausting all retry attempts: {0}")] + RetryFailure(Arc>), } impl From for ScannerError { fn from(error: RobustProviderError) -> ScannerError { match error { - RobustProviderError::RpcError(err) => ScannerError::RpcError(err), RobustProviderError::Timeout => ScannerError::Timeout, - RobustProviderError::RetryFail(num) => ScannerError::RetryFail(num), + RobustProviderError::RetryFailure(err) => ScannerError::RetryFailure(err), RobustProviderError::BlockNotFound(block) => ScannerError::BlockNotFound(block), } } diff --git a/src/event_scanner/message.rs b/src/event_scanner/message.rs index 33da6c61..ebd1081a 100644 --- a/src/event_scanner/message.rs +++ b/src/event_scanner/message.rs @@ -1,6 +1,6 @@ use alloy::{rpc::types::Log, sol_types::SolEvent}; -use crate::{ScannerError, ScannerMessage, robust_provider::RobustProviderError}; +use crate::{ScannerError, ScannerMessage, robust_provider::Error as RobustProviderError}; pub type Message = ScannerMessage, ScannerError>; diff --git a/src/event_scanner/modes/common.rs b/src/event_scanner/modes/common.rs index 59748dce..ef93e17e 100644 --- a/src/event_scanner/modes/common.rs +++ b/src/event_scanner/modes/common.rs @@ -3,7 +3,7 @@ use std::ops::RangeInclusive; use crate::{ block_range_scanner::{MAX_BUFFERED_MESSAGES, Message as BlockRangeMessage}, event_scanner::{filter::EventFilter, listener::EventListener, message::Message}, - robust_provider::{RobustProvider, RobustProviderError}, + robust_provider::{Error as RobustProviderError, RobustProvider}, }; use alloy::{ network::Network, diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 2cf05eec..872c7ba6 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -13,20 +13,18 @@ use thiserror::Error; use tracing::{error, info}; #[derive(Error, Debug, Clone)] -pub enum RobustProviderError { - #[error("RPC error: {0}")] - RpcError(Arc>), +pub enum Error { #[error("Operation timed out")] Timeout, - #[error("Retry failed after {0} tries")] - RetryFail(usize), + #[error("RPC call failed after exhausting all retry attempts: {0}")] + RetryFailure(Arc>), #[error("Block not found, block number: {0}")] BlockNotFound(BlockNumberOrTag), } -impl From> for RobustProviderError { +impl From> for Error { fn from(err: RpcError) -> Self { - RobustProviderError::RpcError(Arc::new(err)) + Error::RetryFailure(Arc::new(err)) } } @@ -89,11 +87,9 @@ impl RobustProvider { pub async fn get_block_by_number( &self, number: BlockNumberOrTag, - ) -> Result { + ) -> Result { info!("eth_getBlockByNumber called"); - let operation = async || { - self.provider.get_block_by_number(number).await.map_err(RobustProviderError::from) - }; + let operation = async || self.provider.get_block_by_number(number).await; let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!(error = %e, "eth_getByBlockNumber failed"); @@ -108,10 +104,9 @@ impl RobustProvider { /// /// Returns an error if RPC call fails repeatedly even /// after exhausting retries or if the call times out. - pub async fn get_block_number(&self) -> Result { + pub async fn get_block_number(&self) -> Result { info!("eth_getBlockNumber called"); - let operation = - async || self.provider.get_block_number().await.map_err(RobustProviderError::from); + let operation = async || self.provider.get_block_number().await; let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!(error = %e, "eth_getBlockNumber failed"); @@ -128,10 +123,9 @@ impl RobustProvider { pub async fn get_block_by_hash( &self, hash: alloy::primitives::BlockHash, - ) -> Result, RobustProviderError> { + ) -> Result, Error> { info!("eth_getBlockByHash called"); - let operation = - async || self.provider.get_block_by_hash(hash).await.map_err(RobustProviderError::from); + let operation = async || self.provider.get_block_by_hash(hash).await; let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!(error = %e, "eth_getBlockByHash failed"); @@ -145,10 +139,9 @@ impl RobustProvider { /// /// Returns an error if RPC call fails repeatedly even /// after exhausting retries or if the call times out. - pub async fn get_logs(&self, filter: &Filter) -> Result, RobustProviderError> { + pub async fn get_logs(&self, filter: &Filter) -> Result, Error> { info!("eth_getLogs called"); - let operation = - async || self.provider.get_logs(filter).await.map_err(RobustProviderError::from); + let operation = async || self.provider.get_logs(filter).await; let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!(error = %e, "eth_getLogs failed"); @@ -162,16 +155,10 @@ impl RobustProvider { /// /// Returns an error if RPC call fails repeatedly even /// after exhausting retries or if the call times out. - pub async fn subscribe_blocks( - &self, - ) -> Result, RobustProviderError> { + pub async fn subscribe_blocks(&self) -> Result, Error> { info!("eth_subscribe called"); - let provider = self.provider.clone(); - let result = self - .retry_with_total_timeout(|| async { - provider.subscribe_blocks().await.map_err(RobustProviderError::from) - }) - .await; + let operation = async || self.provider.subscribe_blocks().await; + let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!(error = %e, "eth_subscribe failed"); } @@ -189,13 +176,10 @@ impl RobustProvider { /// - Returns [`RpcError`] with message "total operation timeout exceeded" /// if the overall timeout elapses. /// - Propagates any [`RpcError`] from the underlying retries. - async fn retry_with_total_timeout( - &self, - operation: F, - ) -> Result + async fn retry_with_total_timeout(&self, operation: F) -> Result where F: Fn() -> Fut, - Fut: Future>, + Fut: Future>>, { let retry_strategy = ExponentialBuilder::default() .with_max_times(self.max_retries) @@ -207,9 +191,8 @@ impl RobustProvider { ) .await { - Ok(Ok(res)) => Ok(res), - Ok(Err(_)) => Err(RobustProviderError::RetryFail(self.max_retries + 1)), - Err(_) => Err(RobustProviderError::Timeout), + Ok(res) => res.map_err(Error::from), + Err(_) => Err(Error::Timeout), } } } @@ -243,7 +226,8 @@ mod tests { let result = provider .retry_with_total_timeout(|| async { call_count.fetch_add(1, Ordering::SeqCst); - Ok(call_count.load(Ordering::SeqCst)) + let count = call_count.load(Ordering::SeqCst); + Ok(count) }) .await; @@ -259,12 +243,10 @@ mod tests { let result = provider .retry_with_total_timeout(|| async { call_count.fetch_add(1, Ordering::SeqCst); - if call_count.load(Ordering::SeqCst) < 3 { - Err(RobustProviderError::RpcError(Arc::new(TransportErrorKind::custom_str( - "temp error", - )))) - } else { - Ok(call_count.load(Ordering::SeqCst)) + let count = call_count.load(Ordering::SeqCst); + match count { + 3 => Ok(count), + _ => Err(TransportErrorKind::BackendGone.into()), } }) .await; @@ -278,21 +260,19 @@ mod tests { let call_count = AtomicUsize::new(0); - let result = provider + let result: Result<(), Error> = provider .retry_with_total_timeout(|| async { call_count.fetch_add(1, Ordering::SeqCst); - // permanent error - Err::(RobustProviderError::Timeout) + Err(TransportErrorKind::BackendGone.into()) }) .await; - let err = result.unwrap_err(); - assert!(matches!(err, RobustProviderError::RetryFail(3))); + assert!(matches!(result, Err(Error::RetryFailure(_)))); assert_eq!(call_count.load(Ordering::SeqCst), 3); } #[tokio::test] - async fn test_retry_with_timeout_respects_total_delay() { + async fn test_retry_with_timeout_respects_max_timeout() { let max_timeout = 50; let provider = test_provider(max_timeout, 10, 1); @@ -303,7 +283,6 @@ mod tests { }) .await; - let err = result.unwrap_err(); - assert!(matches!(err, RobustProviderError::Timeout)); + assert!(matches!(result, Err(Error::Timeout))); } } From 617629e6194655ccac6da54eff44e16735a8301a Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 19:15:58 +0900 Subject: [PATCH 052/122] fix: rename error --- src/robust_provider.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 872c7ba6..3e90f4c4 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -95,7 +95,7 @@ impl RobustProvider { error!(error = %e, "eth_getByBlockNumber failed"); } - result?.ok_or_else(|| RobustProviderError::BlockNotFound(number)) + result?.ok_or_else(|| Error::BlockNotFound(number)) } /// Fetch the latest block number with retry and timeout. From f2b2281a88c032b2c79c4e092177c542e5ef96a7 Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 20:49:12 +0900 Subject: [PATCH 053/122] feat: unwrap block in robust provider --- src/block_range_scanner.rs | 17 +++++++++-------- src/error.rs | 6 +++--- src/robust_provider.rs | 13 +++++++------ 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index db0d7f59..3a8a1e53 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -606,12 +606,7 @@ impl Service { } async fn reorg_detected(&self, hash_to_check: B256) -> Result { - Ok(self - .provider - .get_block_by_hash(hash_to_check) - .await - .map_err(ScannerError::from)? - .is_none()) + Ok(self.provider.get_block_by_hash(hash_to_check).await.is_err()) } async fn stream_historical_blocks( @@ -978,7 +973,10 @@ impl BlockRangeScannerClient { #[cfg(test)] mod tests { - use alloy::providers::{Provider, RootProvider}; + use alloy::{ + eips::BlockId, + providers::{Provider, RootProvider}, + }; use std::time::Duration; use tokio::time::timeout; @@ -1636,7 +1634,10 @@ mod tests { match rx.recv().await.expect("subscriber should stay open") { Message::Error(err) => { - assert!(matches!(err, ScannerError::BlockNotFound(BlockNumberOrTag::Number(4)))); + assert!(matches!( + err, + ScannerError::BlockNotFound(BlockId::Number(BlockNumberOrTag::Number(4))) + )); } other => panic!("unexpected message: {other:?}"), } diff --git a/src/error.rs b/src/error.rs index cd4b4926..d6465a3f 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,7 +1,7 @@ use std::{ops::RangeInclusive, sync::Arc}; use alloy::{ - eips::BlockNumberOrTag, + eips::BlockId, primitives::BlockNumber, transports::{RpcError, TransportErrorKind}, }; @@ -34,8 +34,8 @@ pub enum ScannerError { #[error("Historical sync failed: {0}")] HistoricalSyncError(String), - #[error("Block not found, block number: {0}")] - BlockNotFound(BlockNumberOrTag), + #[error("Block not found, Block Id: {0}")] + BlockNotFound(BlockId), #[error("Operation timed out")] Timeout, diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 3e90f4c4..f66158d4 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -1,7 +1,7 @@ use std::{future::Future, sync::Arc, time::Duration}; use alloy::{ - eips::BlockNumberOrTag, + eips::{BlockId, BlockNumberOrTag}, network::Network, providers::{Provider, RootProvider}, pubsub::Subscription, @@ -18,8 +18,8 @@ pub enum Error { Timeout, #[error("RPC call failed after exhausting all retry attempts: {0}")] RetryFailure(Arc>), - #[error("Block not found, block number: {0}")] - BlockNotFound(BlockNumberOrTag), + #[error("Block not found, Block Id: {0}")] + BlockNotFound(BlockId), } impl From> for Error { @@ -95,7 +95,7 @@ impl RobustProvider { error!(error = %e, "eth_getByBlockNumber failed"); } - result?.ok_or_else(|| Error::BlockNotFound(number)) + result?.ok_or_else(|| Error::BlockNotFound(number.into())) } /// Fetch the latest block number with retry and timeout. @@ -123,14 +123,15 @@ impl RobustProvider { pub async fn get_block_by_hash( &self, hash: alloy::primitives::BlockHash, - ) -> Result, Error> { + ) -> Result { info!("eth_getBlockByHash called"); let operation = async || self.provider.get_block_by_hash(hash).await; let result = self.retry_with_total_timeout(operation).await; if let Err(e) = &result { error!(error = %e, "eth_getBlockByHash failed"); } - result + + result?.ok_or_else(|| Error::BlockNotFound(hash.into())) } /// Fetch logs for the given filter with retry and timeout. From 34692bbea2265d42f2ab3cc6fa537fb7869bc3bc Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 21:19:47 +0900 Subject: [PATCH 054/122] fix: merge errors --- src/robust_provider.rs | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 51333034..97fd2d61 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -101,8 +101,11 @@ impl RobustProvider { number: BlockNumberOrTag, ) -> Result { info!("eth_getBlockByNumber called"); - let operation = async || self.provider.get_block_by_number(number).await; - let result = self.retry_with_total_timeout(operation).await; + let result = self + .retry_with_total_timeout(move |provider| async move { + provider.get_block_by_number(number).await + }) + .await; if let Err(e) = &result { error!(error = %e, "eth_getByBlockNumber failed"); } @@ -118,8 +121,11 @@ impl RobustProvider { /// after exhausting retries or if the call times out. pub async fn get_block_number(&self) -> Result { info!("eth_getBlockNumber called"); - let operation = async || self.provider.get_block_number().await; - let result = self.retry_with_total_timeout(operation).await; + let result = self + .retry_with_total_timeout( + move |provider| async move { provider.get_block_number().await }, + ) + .await; if let Err(e) = &result { error!(error = %e, "eth_getBlockNumber failed"); } @@ -137,8 +143,11 @@ impl RobustProvider { hash: alloy::primitives::BlockHash, ) -> Result { info!("eth_getBlockByHash called"); - let operation = async || self.provider.get_block_by_hash(hash).await; - let result = self.retry_with_total_timeout(operation).await; + let result = self + .retry_with_total_timeout(move |provider| async move { + provider.get_block_by_hash(hash).await + }) + .await; if let Err(e) = &result { error!(error = %e, "eth_getBlockByHash failed"); } @@ -154,8 +163,11 @@ impl RobustProvider { /// after exhausting retries or if the call times out. pub async fn get_logs(&self, filter: &Filter) -> Result, Error> { info!("eth_getLogs called"); - let operation = async || self.provider.get_logs(filter).await; - let result = self.retry_with_total_timeout(operation).await; + let result = self + .retry_with_total_timeout( + move |provider| async move { provider.get_logs(filter).await }, + ) + .await; if let Err(e) = &result { error!(error = %e, "eth_getLogs failed"); } @@ -170,8 +182,11 @@ impl RobustProvider { /// after exhausting retries or if the call times out. pub async fn subscribe_blocks(&self) -> Result, Error> { info!("eth_subscribe called"); - let operation = async || self.provider.subscribe_blocks().await; - let result = self.retry_with_total_timeout(operation).await; + let result = self + .retry_with_total_timeout( + move |provider| async move { provider.subscribe_blocks().await }, + ) + .await; if let Err(e) = &result { error!(error = %e, "eth_subscribe failed"); } From 47b7aa55bffdceb56e50c3919c14a870dfac6db8 Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 21:23:53 +0900 Subject: [PATCH 055/122] fix: test post merge --- src/robust_provider.rs | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 97fd2d61..487fede3 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -282,10 +282,7 @@ impl RobustProvider { mod tests { use super::*; use alloy::network::Ethereum; - use std::sync::{ - Arc, - atomic::{AtomicUsize, Ordering}, - }; + use std::sync::atomic::{AtomicUsize, Ordering}; use tokio::time::sleep; fn test_provider( @@ -306,11 +303,10 @@ mod tests { async fn test_retry_with_timeout_succeeds_on_first_attempt() { let provider = test_provider(100, 3, 10); - let call_count = Arc::new(AtomicUsize::new(0)); - let call_count_clone = call_count.clone(); + let call_count = AtomicUsize::new(0); let result = provider - .retry_with_total_timeout(|| async { + .retry_with_total_timeout(|_| async { call_count.fetch_add(1, Ordering::SeqCst); let count = call_count.load(Ordering::SeqCst); Ok(count) @@ -324,11 +320,10 @@ mod tests { async fn test_retry_with_timeout_retries_on_error() { let provider = test_provider(100, 3, 10); - let call_count = Arc::new(AtomicUsize::new(0)); - let call_count_clone = call_count.clone(); + let call_count = AtomicUsize::new(0); let result = provider - .retry_with_total_timeout(|| async { + .retry_with_total_timeout(|_| async { call_count.fetch_add(1, Ordering::SeqCst); let count = call_count.load(Ordering::SeqCst); match count { @@ -345,11 +340,10 @@ mod tests { async fn test_retry_with_timeout_fails_after_max_retries() { let provider = test_provider(100, 2, 10); - let call_count = Arc::new(AtomicUsize::new(0)); - let call_count_clone = call_count.clone(); + let call_count = AtomicUsize::new(0); let result: Result<(), Error> = provider - .retry_with_total_timeout(|| async { + .retry_with_total_timeout(|_| async { call_count.fetch_add(1, Ordering::SeqCst); Err(TransportErrorKind::BackendGone.into()) }) From c9f140dcf92e16e55c2c2bc0dfc9aa17665894b8 Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 22:00:57 +0900 Subject: [PATCH 056/122] fix: more merge errors --- src/block_range_scanner.rs | 55 +++++++++++++++++-------------- src/error.rs | 12 +++---- src/event_scanner/modes/common.rs | 1 + 3 files changed, 36 insertions(+), 32 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 95597066..f106458a 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -68,6 +68,10 @@ use tokio_stream::{StreamExt, wrappers::ReceiverStream}; use crate::{ ScannerMessage, error::ScannerError, + robust_provider::{ + DEFAULT_MAX_RETRIES, DEFAULT_MAX_TIMEOUT, DEFAULT_RETRY_INTERVAL, + Error as RobustProviderError, RobustProvider, + }, types::{ScannerStatus, TryStream}, }; use alloy::{ @@ -108,6 +112,12 @@ impl PartialEq> for Message { } } +impl From for Message { + fn from(error: RobustProviderError) -> Self { + Message::Error(error.into()) + } +} + impl From> for Message { fn from(error: RpcError) -> Self { Message::Error(error.into()) @@ -513,7 +523,7 @@ impl Service { let max_block_range = self.max_block_range; let provider = self.provider.clone(); - let (start_block, end_block) = join!( + let (start_block, end_block) = try_join!( self.provider.get_block_by_number(start_height), self.provider.get_block_by_number(end_height), )?; @@ -543,7 +553,7 @@ impl Service { to: N::BlockResponse, max_block_range: u64, sender: &mpsc::Sender, - provider: &RootProvider, + provider: &RobustProvider, ) { let mut batch_count = 0; @@ -595,13 +605,11 @@ impl Service { batch_from = from; // store the updated end block hash tip_hash = match provider.get_block_by_number(from.into()).await { - Ok(block) => block - .unwrap_or_else(|| { - panic!("Block with number '{from}' should exist post-reorg") - }) - .header() - .hash(), + Ok(block) => block.header().hash(), Err(e) => { + if matches!(e, RobustProviderError::BlockNotFound(_)) { + panic!("Block with number '{from}' should exist post-reorg"); + } error!(error = %e, "Terminal RPC call error, shutting down"); _ = sender.try_stream(e); return; @@ -762,10 +770,10 @@ impl Service { } async fn reorg_detected( - provider: &RootProvider, + provider: &RobustProvider, hash_to_check: B256, ) -> Result> { - Ok(provider.get_block_by_hash(hash_to_check).await?.is_none()) + Ok(provider.get_block_by_hash(hash_to_check).await.is_err()) } pub struct BlockRangeScannerClient { @@ -914,6 +922,7 @@ mod tests { use super::*; use crate::{assert_closed, assert_empty, assert_next}; use alloy::{ + eips::BlockId, network::Ethereum, providers::{ProviderBuilder, ext::AnvilApi}, rpc::types::anvil::ReorgOptions, @@ -1366,21 +1375,16 @@ mod tests { #[tokio::test] async fn try_send_forwards_errors_to_subscribers() { - let (tx, mut rx) = mpsc::channel(1); + let (tx, mut rx) = mpsc::channel::(1); - service.send_to_subscriber(Message::Error(ScannerError::BlockNotFound(4.into()))).await; + _ = tx.try_stream(ScannerError::BlockNotFound(4.into())).await; - match rx.recv().await.expect("subscriber should stay open") { - Message::Error(err) => { - assert!(matches!( - err, - ScannerError::BlockNotFound(BlockId::Number(BlockNumberOrTag::Number(4))) - )); - } - other => panic!("unexpected message: {other:?}"), - } - - Ok(()) + assert!(matches!( + rx.recv().await, + Some(ScannerMessage::Error(ScannerError::BlockNotFound(BlockId::Number( + BlockNumberOrTag::Number(4) + )))) + )); } #[tokio::test] @@ -1575,7 +1579,10 @@ mod tests { let stream = client.rewind(0, 999).await; - assert!(matches!(stream, Err(ScannerError::BlockNotFound(BlockNumberOrTag::Number(999))))); + assert!(matches!( + stream, + Err(ScannerError::BlockNotFound(BlockId::Number(BlockNumberOrTag::Number(999)))) + )); Ok(()) } diff --git a/src/error.rs b/src/error.rs index d64a524e..cc67dc13 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,11 +1,13 @@ use std::sync::Arc; use alloy::{ - eips::BlockNumberOrTag, - transports::{RpcError, TransportErrorKind, http::reqwest}, + eips::BlockId, + transports::{RpcError, TransportErrorKind}, }; use thiserror::Error; +use crate::robust_provider::Error as RobustProviderError; + #[derive(Error, Debug, Clone)] pub enum ScannerError { // #[error("WebSocket error: {0}")] @@ -48,12 +50,6 @@ impl From for ScannerError { } } -impl From for ScannerError { - fn from(error: reqwest::Error) -> Self { - ScannerError::HttpError(Arc::new(error)) - } -} - impl From for ScannerError { fn from(error: serde_json::Error) -> Self { ScannerError::SerializationError(Arc::new(error)) diff --git a/src/event_scanner/modes/common.rs b/src/event_scanner/modes/common.rs index 4757c54f..205c2f9f 100644 --- a/src/event_scanner/modes/common.rs +++ b/src/event_scanner/modes/common.rs @@ -3,6 +3,7 @@ use std::ops::RangeInclusive; use crate::{ block_range_scanner::{MAX_BUFFERED_MESSAGES, Message as BlockRangeMessage}, event_scanner::{filter::EventFilter, listener::EventListener}, + robust_provider::{Error as RobustProviderError, RobustProvider}, types::TryStream, }; use alloy::{ From 8eff737e23d1c45b2db4188b85728c923b16120e Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 23:08:50 +0900 Subject: [PATCH 057/122] Update src/block_range_scanner.rs Co-authored-by: Nenad --- src/block_range_scanner.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index f106458a..8ed72d04 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -606,10 +606,10 @@ impl Service { // store the updated end block hash tip_hash = match provider.get_block_by_number(from.into()).await { Ok(block) => block.header().hash(), + Err(RobustProviderError::BlockNotFound(_) => { + panic!("Block with number '{from}' should exist post-reorg"); + } Err(e) => { - if matches!(e, RobustProviderError::BlockNotFound(_)) { - panic!("Block with number '{from}' should exist post-reorg"); - } error!(error = %e, "Terminal RPC call error, shutting down"); _ = sender.try_stream(e); return; From b34dbbe1b9536df916f74978bff3fa1a0dac0b2a Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 23:11:52 +0900 Subject: [PATCH 058/122] fix: brackets --- src/block_range_scanner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 8ed72d04..81b1e987 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -606,7 +606,7 @@ impl Service { // store the updated end block hash tip_hash = match provider.get_block_by_number(from.into()).await { Ok(block) => block.header().hash(), - Err(RobustProviderError::BlockNotFound(_) => { + Err(RobustProviderError::BlockNotFound(_)) => { panic!("Block with number '{from}' should exist post-reorg"); } Err(e) => { From 82878f6d59d6e450d374b0bab43ecbdb5e982903 Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 28 Oct 2025 23:18:53 +0900 Subject: [PATCH 059/122] fix: only return true on reorg detected if err is block not found --- src/block_range_scanner.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 81b1e987..44f25db1 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -772,8 +772,12 @@ impl Service { async fn reorg_detected( provider: &RobustProvider, hash_to_check: B256, -) -> Result> { - Ok(provider.get_block_by_hash(hash_to_check).await.is_err()) +) -> Result { + match provider.get_block_by_hash(hash_to_check).await { + Ok(_) => Ok(false), + Err(RobustProviderError::BlockNotFound(_)) => Ok(true), + Err(e) => Err(e.into()), + } } pub struct BlockRangeScannerClient { From 7b8b0821b9cebbca8a4faf37b20db73238ea374e Mon Sep 17 00:00:00 2001 From: Leo Date: Wed, 29 Oct 2025 17:35:16 +0900 Subject: [PATCH 060/122] ref: remove pubsub check --- src/block_range_scanner.rs | 10 ++-------- src/event_scanner/modes/historic.rs | 4 ---- src/event_scanner/modes/latest.rs | 4 ---- src/event_scanner/modes/live.rs | 1 + src/event_scanner/modes/sync.rs | 4 ---- 5 files changed, 3 insertions(+), 20 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 6e8b1d47..f712bfd8 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -79,7 +79,7 @@ use alloy::{ eips::BlockNumberOrTag, network::{BlockResponse, Network, primitives::HeaderResponse}, primitives::{B256, BlockNumber}, - providers::{Provider, RootProvider}, + providers::RootProvider, pubsub::Subscription, rpc::client::ClientBuilder, transports::{ @@ -182,13 +182,8 @@ impl BlockRangeScanner { } /// Adds a fallback provider to the block range scanner - /// - /// # Errors - /// - /// Will panic if the provider does not implement pubsub #[must_use] pub fn fallback_provider(mut self, provider: RootProvider) -> Self { - provider.client().expect_pubsub_frontend(); self.fallback_providers.push(provider); self } @@ -223,10 +218,9 @@ impl BlockRangeScanner { /// /// # Errors /// - /// Returns an error if the connection fails or provider does not support pubsub. + /// Returns an error if the connection fails #[must_use] pub fn connect(self, provider: RootProvider) -> ConnectedBlockRangeScanner { - provider.client().expect_pubsub_frontend(); let robust_provider = RobustProvider::new(provider) .max_timeout(self.max_timeout) .max_retries(self.max_retries) diff --git a/src/event_scanner/modes/historic.rs b/src/event_scanner/modes/historic.rs index 91ed86eb..18604b87 100644 --- a/src/event_scanner/modes/historic.rs +++ b/src/event_scanner/modes/historic.rs @@ -60,10 +60,6 @@ impl HistoricScannerBuilder { } /// Adds a fallback provider (can add multiple) - /// - /// # Errors - /// - /// Will panic if the provider does not implement pubsub #[must_use] pub fn fallback_provider(mut self, provider: RootProvider) -> Self { self.block_range_scanner.fallback_providers.push(provider); diff --git a/src/event_scanner/modes/latest.rs b/src/event_scanner/modes/latest.rs index d67962a3..4a826538 100644 --- a/src/event_scanner/modes/latest.rs +++ b/src/event_scanner/modes/latest.rs @@ -79,10 +79,6 @@ impl LatestScannerBuilder { } /// Adds a fallback provider (can add multiple) - /// - /// # Errors - /// - /// Will panic if the provider does not implement pubsub #[must_use] pub fn fallback_provider(mut self, provider: RootProvider) -> Self { self.block_range_scanner.fallback_providers.push(provider); diff --git a/src/event_scanner/modes/live.rs b/src/event_scanner/modes/live.rs index e42c3e7f..8d94e44c 100644 --- a/src/event_scanner/modes/live.rs +++ b/src/event_scanner/modes/live.rs @@ -53,6 +53,7 @@ impl LiveScannerBuilder { self } + /// Adds a fallback provider (can add multiple) #[must_use] pub fn fallback_provider(mut self, provider: RootProvider) -> Self { self.block_range_scanner.fallback_providers.push(provider); diff --git a/src/event_scanner/modes/sync.rs b/src/event_scanner/modes/sync.rs index 7c65a98e..b50637f3 100644 --- a/src/event_scanner/modes/sync.rs +++ b/src/event_scanner/modes/sync.rs @@ -63,10 +63,6 @@ impl SyncScannerBuilder { } /// Adds a fallback provider (can add multiple) - /// - /// # Errors - /// - /// Will panic if the provider does not implement pubsub #[must_use] pub fn fallback_provider(mut self, provider: RootProvider) -> Self { self.block_range_scanner.fallback_providers.push(provider); From 63cefb9db3a9ac37504a3f7120527c66f89397d6 Mon Sep 17 00:00:00 2001 From: Leo Date: Wed, 29 Oct 2025 23:26:23 +0900 Subject: [PATCH 061/122] feat: merge changes --- Cargo.lock | 24 +++++++++++++++++++ Cargo.toml | 1 + src/block_range_scanner.rs | 15 +++--------- src/event_scanner/scanner/common.rs | 2 +- src/event_scanner/scanner/sync/from_latest.rs | 9 ++----- 5 files changed, 31 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6e7f81ec..346f80e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1084,6 +1084,17 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "backon" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cffb0e931875b666fc4fcb20fee52e9bbd1ef836fd9e9e04ec21555f9f85f7ef" +dependencies = [ + "fastrand", + "gloo-timers", + "tokio", +] + [[package]] name = "backtrace" version = "0.3.75" @@ -1667,6 +1678,7 @@ dependencies = [ "alloy-node-bindings", "anyhow", "async-trait", + "backon", "chrono", "serde", "serde_json", @@ -1917,6 +1929,18 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" +[[package]] +name = "gloo-timers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "group" version = "0.13.0" diff --git a/Cargo.toml b/Cargo.toml index 388fc462..71f1bf85 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,6 +68,7 @@ chrono.workspace = true alloy-node-bindings.workspace = true tokio-stream.workspace = true tracing.workspace = true +backon.workspace = true [dev-dependencies] tracing-subscriber.workspace = true diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index dd4c7a99..e1064152 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -424,23 +424,14 @@ impl Service { let get_start_block = async || -> Result { let block = match start_height { BlockNumberOrTag::Number(num) => num, - block_tag => provider - .get_block_by_number(block_tag) - .await? - .ok_or_else(|| ScannerError::BlockNotFound(block_tag))? - .header() - .number(), + block_tag => provider.get_block_by_number(block_tag).await?.header().number(), }; Ok(block) }; let get_latest_block = async || -> Result { - let block = provider - .get_block_by_number(BlockNumberOrTag::Latest) - .await? - .ok_or_else(|| ScannerError::BlockNotFound(BlockNumberOrTag::Latest))? - .header() - .number(); + let block = + provider.get_block_by_number(BlockNumberOrTag::Latest).await?.header().number(); Ok(block) }; diff --git a/src/event_scanner/scanner/common.rs b/src/event_scanner/scanner/common.rs index 5dd10a65..6bae3d9f 100644 --- a/src/event_scanner/scanner/common.rs +++ b/src/event_scanner/scanner/common.rs @@ -47,7 +47,7 @@ pub enum ConsumerMode { /// Assumes it is running in a separate tokio task, so as to be non-blocking. pub async fn handle_stream + Unpin>( mut stream: S, - provider: &RootProvider, + provider: &RobustProvider, listeners: &[EventListener], mode: ConsumerMode, ) { diff --git a/src/event_scanner/scanner/sync/from_latest.rs b/src/event_scanner/scanner/sync/from_latest.rs index 91626c18..e266b699 100644 --- a/src/event_scanner/scanner/sync/from_latest.rs +++ b/src/event_scanner/scanner/sync/from_latest.rs @@ -2,7 +2,6 @@ use alloy::{ consensus::BlockHeader, eips::BlockNumberOrTag, network::{BlockResponse, Network}, - providers::Provider, }; use tokio::sync::mpsc; @@ -57,12 +56,8 @@ impl EventScanner { // This is used to determine the starting point for the rewind stream and the live // stream. We do this before starting the streams to avoid a race condition // where the latest block changes while we're setting up the streams. - let latest_block = provider - .get_block_by_number(BlockNumberOrTag::Latest) - .await? - .ok_or(ScannerError::BlockNotFound(BlockNumberOrTag::Latest))? - .header() - .number(); + let latest_block = + provider.get_block_by_number(BlockNumberOrTag::Latest).await?.header().number(); // Setup rewind and live streams to run in parallel. let rewind_stream = client.rewind(BlockNumberOrTag::Earliest, latest_block).await?; From 5df42d336ef68a36ee4fd8c8e7e31ef8eddc8806 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 30 Oct 2025 17:42:16 +0900 Subject: [PATCH 062/122] chore: add back fallback provider to scanners --- src/event_scanner/scanner/historic.rs | 11 ++++++-- src/event_scanner/scanner/latest.rs | 11 ++++++-- src/event_scanner/scanner/live.rs | 9 ++++++- src/event_scanner/scanner/mod.rs | 25 ++++++++----------- src/event_scanner/scanner/sync/from_block.rs | 15 ++++++++--- src/event_scanner/scanner/sync/from_latest.rs | 14 ++++++++--- src/event_scanner/scanner/sync/mod.rs | 12 ++++----- 7 files changed, 65 insertions(+), 32 deletions(-) diff --git a/src/event_scanner/scanner/historic.rs b/src/event_scanner/scanner/historic.rs index 8e770d53..f7fa71dc 100644 --- a/src/event_scanner/scanner/historic.rs +++ b/src/event_scanner/scanner/historic.rs @@ -1,4 +1,4 @@ -use alloy::{eips::BlockNumberOrTag, network::Network}; +use alloy::{eips::BlockNumberOrTag, network::Network, providers::RootProvider}; use super::common::{ConsumerMode, handle_stream}; use crate::{ @@ -6,7 +6,14 @@ use crate::{ event_scanner::scanner::{EventScanner, Historic}, }; -impl EventScannerBuilder { +impl EventScannerBuilder { + /// Adds a fallback provider (can add multiple) + #[must_use] + pub fn fallback_provider(mut self, provider: RootProvider) -> Self { + self.block_range_scanner.fallback_providers.push(provider); + self + } + #[must_use] pub fn max_block_range(mut self, max_block_range: u64) -> Self { self.block_range_scanner.max_block_range = max_block_range; diff --git a/src/event_scanner/scanner/latest.rs b/src/event_scanner/scanner/latest.rs index 9ba8552f..ff5602fd 100644 --- a/src/event_scanner/scanner/latest.rs +++ b/src/event_scanner/scanner/latest.rs @@ -1,4 +1,4 @@ -use alloy::{eips::BlockNumberOrTag, network::Network}; +use alloy::{eips::BlockNumberOrTag, network::Network, providers::RootProvider}; use super::common::{ConsumerMode, handle_stream}; use crate::{ @@ -6,7 +6,14 @@ use crate::{ event_scanner::{EventScanner, LatestEvents}, }; -impl EventScannerBuilder { +impl EventScannerBuilder { + /// Adds a fallback provider (can add multiple) + #[must_use] + pub fn fallback_provider(mut self, provider: RootProvider) -> Self { + self.block_range_scanner.fallback_providers.push(provider); + self + } + #[must_use] pub fn max_block_range(mut self, max_block_range: u64) -> Self { self.block_range_scanner.max_block_range = max_block_range; diff --git a/src/event_scanner/scanner/live.rs b/src/event_scanner/scanner/live.rs index 3e67aef1..5d4e4974 100644 --- a/src/event_scanner/scanner/live.rs +++ b/src/event_scanner/scanner/live.rs @@ -6,7 +6,14 @@ use crate::{ event_scanner::{EventScanner, scanner::Live}, }; -impl EventScannerBuilder { +impl EventScannerBuilder { + /// Adds a fallback provider (can add multiple) + #[must_use] + pub fn fallback_provider(mut self, provider: RootProvider) -> Self { + self.block_range_scanner.fallback_providers.push(provider); + self + } + #[must_use] pub fn max_block_range(mut self, max_block_range: u64) -> Self { self.block_range_scanner.max_block_range = max_block_range; diff --git a/src/event_scanner/scanner/mod.rs b/src/event_scanner/scanner/mod.rs index 04a669f8..f958708a 100644 --- a/src/event_scanner/scanner/mod.rs +++ b/src/event_scanner/scanner/mod.rs @@ -326,7 +326,7 @@ impl EventScannerBuilder { } } -impl EventScannerBuilder { +impl EventScannerBuilder { #[must_use] pub fn new(count: usize) -> Self { Self { @@ -341,7 +341,7 @@ impl EventScannerBuilder { } } -impl EventScannerBuilder { +impl EventScannerBuilder { #[must_use] pub fn new(count: usize) -> Self { Self { @@ -354,7 +354,7 @@ impl EventScannerBuilder { } } -impl EventScannerBuilder { +impl EventScannerBuilder { #[must_use] pub fn new(from_block: BlockNumberOrTag) -> Self { Self { @@ -364,7 +364,7 @@ impl EventScannerBuilder { } } -impl EventScannerBuilder { +impl EventScannerBuilder { /// Connects to the provider via WebSocket. /// /// Final builder method: consumes the builder and returns the built [`HistoricEventScanner`]. @@ -372,8 +372,8 @@ impl EventScannerBuilder { /// # Errors /// /// Returns an error if the connection fails - pub async fn connect_ws(self, ws_url: Url) -> TransportResult> { - let block_range_scanner = self.block_range_scanner.connect_ws::(ws_url).await?; + pub async fn connect_ws(self, ws_url: Url) -> TransportResult> { + let block_range_scanner = self.block_range_scanner.connect_ws(ws_url).await?; Ok(EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() }) } @@ -384,11 +384,8 @@ impl EventScannerBuilder { /// # Errors /// /// Returns an error if the connection fails - pub async fn connect_ipc( - self, - ipc_path: String, - ) -> TransportResult> { - let block_range_scanner = self.block_range_scanner.connect_ipc::(ipc_path).await?; + pub async fn connect_ipc(self, ipc_path: String) -> TransportResult> { + let block_range_scanner = self.block_range_scanner.connect_ipc(ipc_path).await?; Ok(EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() }) } @@ -400,8 +397,8 @@ impl EventScannerBuilder { /// /// Returns an error if the connection fails #[must_use] - pub fn connect(self, provider: RootProvider) -> EventScanner { - let block_range_scanner = self.block_range_scanner.connect::(provider); + pub fn connect(self, provider: RootProvider) -> EventScanner { + let block_range_scanner = self.block_range_scanner.connect(provider); EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() } } } @@ -423,7 +420,7 @@ mod tests { #[test] fn test_historic_scanner_config_defaults() { - let builder = EventScannerBuilder::::default(); + let builder = EventScannerBuilder::::default(); assert!(matches!(builder.config.from_block, BlockNumberOrTag::Earliest)); assert!(matches!(builder.config.to_block, BlockNumberOrTag::Latest)); diff --git a/src/event_scanner/scanner/sync/from_block.rs b/src/event_scanner/scanner/sync/from_block.rs index bf052311..786df558 100644 --- a/src/event_scanner/scanner/sync/from_block.rs +++ b/src/event_scanner/scanner/sync/from_block.rs @@ -1,4 +1,4 @@ -use alloy::network::Network; +use alloy::{network::Network, providers::RootProvider}; use crate::{ EventScannerBuilder, ScannerError, @@ -8,7 +8,14 @@ use crate::{ }, }; -impl EventScannerBuilder { +impl EventScannerBuilder { + /// Adds a fallback provider (can add multiple) + #[must_use] + pub fn fallback_provider(mut self, provider: RootProvider) -> Self { + self.block_range_scanner.fallback_providers.push(provider); + self + } + #[must_use] pub fn max_block_range(mut self, max_block_range: u64) -> Self { self.block_range_scanner.max_block_range = max_block_range; @@ -16,8 +23,8 @@ impl EventScannerBuilder { } #[must_use] - pub fn block_confirmations(mut self, count: u64) -> Self { - self.config.block_confirmations = count; + pub fn block_confirmations(mut self, confirmations: u64) -> Self { + self.config.block_confirmations = confirmations; self } } diff --git a/src/event_scanner/scanner/sync/from_latest.rs b/src/event_scanner/scanner/sync/from_latest.rs index e266b699..817c5906 100644 --- a/src/event_scanner/scanner/sync/from_latest.rs +++ b/src/event_scanner/scanner/sync/from_latest.rs @@ -2,6 +2,7 @@ use alloy::{ consensus::BlockHeader, eips::BlockNumberOrTag, network::{BlockResponse, Network}, + providers::RootProvider, }; use tokio::sync::mpsc; @@ -20,10 +21,17 @@ use crate::{ }, }; -impl EventScannerBuilder { +impl EventScannerBuilder { + /// Adds a fallback provider (can add multiple) #[must_use] - pub fn block_confirmations(mut self, count: u64) -> Self { - self.config.block_confirmations = count; + pub fn fallback_provider(mut self, provider: RootProvider) -> Self { + self.block_range_scanner.fallback_providers.push(provider); + self + } + + #[must_use] + pub fn block_confirmations(mut self, confirmations: u64) -> Self { + self.config.block_confirmations = confirmations; self } } diff --git a/src/event_scanner/scanner/sync/mod.rs b/src/event_scanner/scanner/sync/mod.rs index 6701752e..b01e6aa3 100644 --- a/src/event_scanner/scanner/sync/mod.rs +++ b/src/event_scanner/scanner/sync/mod.rs @@ -1,4 +1,4 @@ -use alloy::eips::BlockNumberOrTag; +use alloy::{eips::BlockNumberOrTag, network::Network}; pub(crate) mod from_block; pub(crate) mod from_latest; @@ -8,7 +8,7 @@ use crate::{ event_scanner::scanner::{SyncFromBlock, SyncFromLatestEvents, Synchronize}, }; -impl EventScannerBuilder { +impl EventScannerBuilder { /// Scans the latest `count` matching events per registered listener, then automatically /// transitions to live streaming mode. /// @@ -101,8 +101,8 @@ impl EventScannerBuilder { /// [reorg]: crate::types::ScannerStatus::ReorgDetected /// [switch_to_live]: crate::types::ScannerStatus::SwitchingToLive #[must_use] - pub fn from_latest(self, count: usize) -> EventScannerBuilder { - EventScannerBuilder::::new(count) + pub fn from_latest(self, count: usize) -> EventScannerBuilder { + EventScannerBuilder::::new(count) } /// Streams events from a specific starting block to the present, then automatically @@ -209,7 +209,7 @@ impl EventScannerBuilder { pub fn from_block( self, block: impl Into, - ) -> EventScannerBuilder { - EventScannerBuilder::::new(block.into()) + ) -> EventScannerBuilder { + EventScannerBuilder::::new(block.into()) } } From 4a63f7ea67bf51769bc5be670eb8bed42c21aea2 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 30 Oct 2025 18:28:22 +0900 Subject: [PATCH 063/122] feat: update block range scanner to use url for fallback --- src/block_range_scanner.rs | 197 ++++++++++++++++++++++++++----------- 1 file changed, 138 insertions(+), 59 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 68d90001..07f23a8d 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -83,7 +83,8 @@ use alloy::{ pubsub::Subscription, rpc::client::ClientBuilder, transports::{ - RpcError, TransportErrorKind, TransportResult, http::reqwest::Url, ws::WsConnect, + RpcError, TransportErrorKind, TransportResult, http::reqwest::Url, ipc::IpcConnect, + ws::WsConnect, }, }; use tracing::{debug, error, info, warn}; @@ -131,21 +132,22 @@ impl From for Message { } #[derive(Clone)] -pub struct BlockRangeScanner { +pub struct BlockRangeScanner { pub max_block_range: u64, pub max_timeout: Duration, pub max_retries: usize, pub retry_interval: Duration, - pub fallback_providers: Vec>, + pub fallback_ws_urls: Vec, + pub fallback_ipc_paths: Vec, } -impl Default for BlockRangeScanner { +impl Default for BlockRangeScanner { fn default() -> Self { Self::new() } } -impl BlockRangeScanner { +impl BlockRangeScanner { #[must_use] pub fn new() -> Self { Self { @@ -153,7 +155,8 @@ impl BlockRangeScanner { max_timeout: DEFAULT_MAX_TIMEOUT, max_retries: DEFAULT_MAX_RETRIES, retry_interval: DEFAULT_RETRY_INTERVAL, - fallback_providers: Vec::new(), + fallback_ws_urls: Vec::new(), + fallback_ipc_paths: Vec::new(), } } @@ -181,50 +184,126 @@ impl BlockRangeScanner { self } - /// Adds a fallback provider to the block range scanner + /// Adds a fallback WebSocket URL to the block range scanner + /// + /// The WebSocket connection will be established when calling the `connect` methods + #[must_use] + pub fn fallback_ws(mut self, url: Url) -> Self { + self.fallback_ws_urls.push(url); + self + } + + /// Adds a fallback IPC path to the block range scanner + /// + /// The IPC connection will be established when calling the `connect` methods #[must_use] - pub fn fallback_provider(mut self, provider: RootProvider) -> Self { - self.fallback_providers.push(provider); + pub fn fallback_ipc(mut self, path: String) -> Self { + self.fallback_ipc_paths.push(path); self } /// Connects to the provider via WebSocket /// + /// This method establishes the primary WebSocket connection and all configured fallback + /// connections (both WebSocket and IPC). + /// /// # Errors /// - /// Returns an error if the connection fails - pub async fn connect_ws(self, ws_url: Url) -> TransportResult> { + /// Returns an error if the primary connection fails + pub async fn connect_ws( + self, + ws_url: Url, + ) -> TransportResult> { let provider = RootProvider::::new(ClientBuilder::default().ws(WsConnect::new(ws_url)).await?); - Ok(self.connect(provider)) + + let fallback_providers = self.connect_all_fallbacks::().await; + + Ok(self.connect_with_fallbacks(provider, fallback_providers)) } /// Connects to the provider via IPC /// + /// This method establishes the primary IPC connection and all configured fallback + /// connections (both WebSocket and IPC). + /// /// # Errors /// - /// Returns an error if the connection fails - pub async fn connect_ipc( + /// Returns an error if the primary connection fails + pub async fn connect_ipc( self, ipc_path: String, ) -> Result, RpcError> { - let provider = RootProvider::::new(ClientBuilder::default().ipc(ipc_path.into()).await?); - Ok(self.connect(provider)) - } + let provider = + RootProvider::::new(ClientBuilder::default().ipc(IpcConnect::new(ipc_path)).await?); - // pub fn fallback_provider(self, provider: RootProvider) -> Self {} + let fallback_providers = self.connect_all_fallbacks::().await; + + Ok(self.connect_with_fallbacks(provider, fallback_providers)) + } /// Connects to an existing provider + #[must_use] + pub async fn connect( + self, + provider: RootProvider, + ) -> ConnectedBlockRangeScanner { + let fallback_providers = self.connect_all_fallbacks::().await; + self.connect_with_fallbacks(provider, fallback_providers) + } + + /// Establishes connections to all configured fallback providers (both WebSocket and IPC). /// - /// # Errors - /// - /// Returns an error if the connection fails + /// Logs warnings for any fallback connections that fail, but continues attempting + /// to connect to remaining fallbacks. + async fn connect_all_fallbacks(&self) -> Vec> { + let mut fallback_providers = Vec::new(); + + // Connect to WebSocket fallbacks + for url in &self.fallback_ws_urls { + match ClientBuilder::default().ws(WsConnect::new(url.clone())).await { + Ok(client) => { + fallback_providers.push(RootProvider::::new(client)); + info!("Successfully connected to fallback WebSocket: {}", url); + } + Err(e) => { + warn!("Failed to connect to fallback WebSocket {}: {}", url, e); + } + } + } + + // Connect to IPC fallbacks + for path in &self.fallback_ipc_paths { + match ClientBuilder::default().ipc(IpcConnect::new(path.clone())).await { + Ok(client) => { + fallback_providers.push(RootProvider::::new(client)); + info!("Successfully connected to fallback IPC: {}", path); + } + Err(e) => { + warn!("Failed to connect to fallback IPC {}: {}", path, e); + } + } + } + + fallback_providers + } + + /// Connects to an existing provider with fallback providers #[must_use] - pub fn connect(self, provider: RootProvider) -> ConnectedBlockRangeScanner { - let robust_provider = RobustProvider::new(provider) + fn connect_with_fallbacks( + self, + provider: RootProvider, + fallback_providers: Vec>, + ) -> ConnectedBlockRangeScanner { + let mut robust_provider = RobustProvider::new(provider) .max_timeout(self.max_timeout) .max_retries(self.max_retries) .retry_interval(self.retry_interval); + + for fallback in fallback_providers { + robust_provider = robust_provider.fallback_provider(fallback); + } + ConnectedBlockRangeScanner { provider: robust_provider, max_block_range: self.max_block_range, @@ -954,7 +1033,7 @@ mod tests { #[test] fn block_range_scanner_defaults_match_constants() { - let scanner = BlockRangeScanner::::new(); + let scanner = BlockRangeScanner::new(); assert_eq!(scanner.max_block_range, DEFAULT_MAX_BLOCK_RANGE); } @@ -963,7 +1042,7 @@ mod tests { fn builder_methods_update_configuration() { let max_block_range = 42; - let scanner = BlockRangeScanner::::new().max_block_range(max_block_range); + let scanner = BlockRangeScanner::new().max_block_range(max_block_range); assert_eq!(scanner.max_block_range, max_block_range); } @@ -975,8 +1054,8 @@ mod tests { // --- Zero block confirmations -> stream immediately --- - let client = BlockRangeScanner::::new() - .connect_ws(anvil.ws_endpoint_url()) + let client = BlockRangeScanner::new() + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; @@ -1026,8 +1105,8 @@ mod tests { let block_confirmations = 5; - let client = BlockRangeScanner::::new() - .connect_ws(anvil.ws_endpoint_url()) + let client = BlockRangeScanner::new() + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; @@ -1061,8 +1140,8 @@ mod tests { let block_confirmations = 5; - let client = BlockRangeScanner::::new() - .connect_ws(anvil.ws_endpoint_url()) + let client = BlockRangeScanner::new() + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; @@ -1105,8 +1184,8 @@ mod tests { let block_confirmations = 3; - let client = BlockRangeScanner::::new() - .connect_ws(anvil.ws_endpoint_url()) + let client = BlockRangeScanner::new() + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; @@ -1172,9 +1251,9 @@ mod tests { let end_num = 110; - let client = BlockRangeScanner::::new() + let client = BlockRangeScanner::new() .max_block_range(30) - .connect_ws(anvil.ws_endpoint_url()) + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; @@ -1207,9 +1286,9 @@ mod tests { let end_num = 120; - let client = BlockRangeScanner::::new() + let client = BlockRangeScanner::new() .max_block_range(30) - .connect_ws(anvil.ws_endpoint_url()) + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; @@ -1242,9 +1321,9 @@ mod tests { provider.anvil_mine(Some(100), None).await?; - let client = BlockRangeScanner::::new() + let client = BlockRangeScanner::new() .max_block_range(5) - .connect_ws(anvil.ws_endpoint_url()) + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; @@ -1273,9 +1352,9 @@ mod tests { assert_closed!(stream); // range where blocks per epoch is larger than the number of blocks on chain - let client = BlockRangeScanner::::new() + let client = BlockRangeScanner::new() .max_block_range(200) - .connect_ws(anvil.ws_endpoint_url()) + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; @@ -1297,9 +1376,9 @@ mod tests { let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; provider.anvil_mine(Some(11), None).await?; - let client = BlockRangeScanner::::new() + let client = BlockRangeScanner::new() .max_block_range(5) - .connect_ws(anvil.ws_endpoint_url()) + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; @@ -1416,9 +1495,9 @@ mod tests { provider.anvil_mine(Some(150), None).await?; - let client = BlockRangeScanner::::new() + let client = BlockRangeScanner::new() .max_block_range(100) - .connect_ws(anvil.ws_endpoint_url()) + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; @@ -1440,9 +1519,9 @@ mod tests { provider.anvil_mine(Some(15), None).await?; - let client = BlockRangeScanner::::new() + let client = BlockRangeScanner::new() .max_block_range(5) - .connect_ws(anvil.ws_endpoint_url()) + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; @@ -1465,9 +1544,9 @@ mod tests { provider.anvil_mine(Some(15), None).await?; - let client = BlockRangeScanner::::new() + let client = BlockRangeScanner::new() .max_block_range(4) - .connect_ws(anvil.ws_endpoint_url()) + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; @@ -1490,9 +1569,9 @@ mod tests { provider.anvil_mine(Some(15), None).await?; - let client = BlockRangeScanner::::new() + let client = BlockRangeScanner::new() .max_block_range(5) - .connect_ws(anvil.ws_endpoint_url()) + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; @@ -1512,9 +1591,9 @@ mod tests { provider.anvil_mine(Some(15), None).await?; - let client = BlockRangeScanner::::new() + let client = BlockRangeScanner::new() .max_block_range(1) - .connect_ws(anvil.ws_endpoint_url()) + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; @@ -1538,9 +1617,9 @@ mod tests { // Mine 20 blocks, so the total number of blocks is 21 (including 0th block) provider.anvil_mine(Some(20), None).await?; - let client = BlockRangeScanner::::new() + let client = BlockRangeScanner::new() .max_block_range(7) - .connect_ws(anvil.ws_endpoint_url()) + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; @@ -1563,9 +1642,9 @@ mod tests { // Ensure blocks at 3 and 15 exist provider.anvil_mine(Some(16), None).await?; - let client = BlockRangeScanner::::new() + let client = BlockRangeScanner::new() .max_block_range(5) - .connect_ws(anvil.ws_endpoint_url()) + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; @@ -1591,9 +1670,9 @@ mod tests { let anvil = Anvil::new().try_spawn()?; // Do not mine up to 999 so start won't exist - let client = BlockRangeScanner::::new() + let client = BlockRangeScanner::new() .max_block_range(5) - .connect_ws(anvil.ws_endpoint_url()) + .connect_ws::(anvil.ws_endpoint_url()) .await? .run()?; From 8e5a85e2aec52f7b0c0cde5e0eb96a963b400b46 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 30 Oct 2025 18:32:11 +0900 Subject: [PATCH 064/122] feat: add errors back --- src/block_range_scanner.rs | 52 +++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 29 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 07f23a8d..41ce29d9 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -217,7 +217,7 @@ impl BlockRangeScanner { let provider = RootProvider::::new(ClientBuilder::default().ws(WsConnect::new(ws_url)).await?); - let fallback_providers = self.connect_all_fallbacks::().await; + let fallback_providers = self.connect_all_fallbacks::().await?; Ok(self.connect_with_fallbacks(provider, fallback_providers)) } @@ -237,55 +237,49 @@ impl BlockRangeScanner { let provider = RootProvider::::new(ClientBuilder::default().ipc(IpcConnect::new(ipc_path)).await?); - let fallback_providers = self.connect_all_fallbacks::().await; + let fallback_providers = self.connect_all_fallbacks::().await?; Ok(self.connect_with_fallbacks(provider, fallback_providers)) } /// Connects to an existing provider + /// + /// This method also tries to connect any fallback providers (both WebSocket and IPC) + /// + /// # Errors + /// + /// Returns an error if any fallback connection fails #[must_use] pub async fn connect( self, provider: RootProvider, - ) -> ConnectedBlockRangeScanner { - let fallback_providers = self.connect_all_fallbacks::().await; - self.connect_with_fallbacks(provider, fallback_providers) + ) -> Result, RpcError> { + let fallback_providers = self.connect_all_fallbacks::().await?; + + Ok(self.connect_with_fallbacks(provider, fallback_providers)) } /// Establishes connections to all configured fallback providers (both WebSocket and IPC). /// - /// Logs warnings for any fallback connections that fail, but continues attempting - /// to connect to remaining fallbacks. - async fn connect_all_fallbacks(&self) -> Vec> { + /// # Errors + /// + /// Returns an error if any fallback connection fails + async fn connect_all_fallbacks( + &self, + ) -> Result>, RpcError> { let mut fallback_providers = Vec::new(); - // Connect to WebSocket fallbacks for url in &self.fallback_ws_urls { - match ClientBuilder::default().ws(WsConnect::new(url.clone())).await { - Ok(client) => { - fallback_providers.push(RootProvider::::new(client)); - info!("Successfully connected to fallback WebSocket: {}", url); - } - Err(e) => { - warn!("Failed to connect to fallback WebSocket {}: {}", url, e); - } - } + let client = ClientBuilder::default().ws(WsConnect::new(url.clone())).await?; + fallback_providers.push(RootProvider::::new(client)); } - // Connect to IPC fallbacks for path in &self.fallback_ipc_paths { - match ClientBuilder::default().ipc(IpcConnect::new(path.clone())).await { - Ok(client) => { - fallback_providers.push(RootProvider::::new(client)); - info!("Successfully connected to fallback IPC: {}", path); - } - Err(e) => { - warn!("Failed to connect to fallback IPC {}: {}", path, e); - } - } + let client = ClientBuilder::default().ipc(IpcConnect::new(path.clone())).await?; + fallback_providers.push(RootProvider::::new(client)); } - fallback_providers + Ok(fallback_providers) } /// Connects to an existing provider with fallback providers From 63eb7d41cf369609e779d334bd576677521c3c18 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 30 Oct 2025 18:32:36 +0900 Subject: [PATCH 065/122] ref: remove generic network param from scanners --- src/event_scanner/scanner/mod.rs | 33 +++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/src/event_scanner/scanner/mod.rs b/src/event_scanner/scanner/mod.rs index f958708a..70e89122 100644 --- a/src/event_scanner/scanner/mod.rs +++ b/src/event_scanner/scanner/mod.rs @@ -67,12 +67,12 @@ pub struct EventScanner { } #[derive(Default)] -pub struct EventScannerBuilder { +pub struct EventScannerBuilder { pub(crate) config: M, - pub(crate) block_range_scanner: BlockRangeScanner, + pub(crate) block_range_scanner: BlockRangeScanner, } -impl EventScannerBuilder { +impl EventScannerBuilder { /// Streams events from a historical block range. /// /// # Example @@ -135,7 +135,7 @@ impl EventScannerBuilder { /// RPC call /// - **Completion**: The scanner completes when the entire range has been processed #[must_use] - pub fn historic() -> EventScannerBuilder { + pub fn historic() -> EventScannerBuilder { EventScannerBuilder::default() } @@ -326,7 +326,7 @@ impl EventScannerBuilder { } } -impl EventScannerBuilder { +impl EventScannerBuilder { #[must_use] pub fn new(count: usize) -> Self { Self { @@ -341,7 +341,7 @@ impl EventScannerBuilder { } } -impl EventScannerBuilder { +impl EventScannerBuilder { #[must_use] pub fn new(count: usize) -> Self { Self { @@ -354,7 +354,7 @@ impl EventScannerBuilder { } } -impl EventScannerBuilder { +impl EventScannerBuilder { #[must_use] pub fn new(from_block: BlockNumberOrTag) -> Self { Self { @@ -364,7 +364,7 @@ impl EventScannerBuilder { } } -impl EventScannerBuilder { +impl EventScannerBuilder { /// Connects to the provider via WebSocket. /// /// Final builder method: consumes the builder and returns the built [`HistoricEventScanner`]. @@ -372,8 +372,8 @@ impl EventScannerBuilder { /// # Errors /// /// Returns an error if the connection fails - pub async fn connect_ws(self, ws_url: Url) -> TransportResult> { - let block_range_scanner = self.block_range_scanner.connect_ws(ws_url).await?; + pub async fn connect_ws(self, ws_url: Url) -> TransportResult> { + let block_range_scanner = self.block_range_scanner.connect_ws::(ws_url).await?; Ok(EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() }) } @@ -384,8 +384,11 @@ impl EventScannerBuilder { /// # Errors /// /// Returns an error if the connection fails - pub async fn connect_ipc(self, ipc_path: String) -> TransportResult> { - let block_range_scanner = self.block_range_scanner.connect_ipc(ipc_path).await?; + pub async fn connect_ipc( + self, + ipc_path: String, + ) -> TransportResult> { + let block_range_scanner = self.block_range_scanner.connect_ipc::(ipc_path).await?; Ok(EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() }) } @@ -397,8 +400,8 @@ impl EventScannerBuilder { /// /// Returns an error if the connection fails #[must_use] - pub fn connect(self, provider: RootProvider) -> EventScanner { - let block_range_scanner = self.block_range_scanner.connect(provider); + pub fn connect(self, provider: RootProvider) -> EventScanner { + let block_range_scanner = self.block_range_scanner.connect::(provider); EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() } } } @@ -420,7 +423,7 @@ mod tests { #[test] fn test_historic_scanner_config_defaults() { - let builder = EventScannerBuilder::::default(); + let builder = EventScannerBuilder::::default(); assert!(matches!(builder.config.from_block, BlockNumberOrTag::Earliest)); assert!(matches!(builder.config.to_block, BlockNumberOrTag::Latest)); From c7fdfbcd1bc9bf069a6ebd2454dfb977646aa0ce Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 30 Oct 2025 18:36:57 +0900 Subject: [PATCH 066/122] fix: update tests --- src/event_scanner/scanner/mod.rs | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/src/event_scanner/scanner/mod.rs b/src/event_scanner/scanner/mod.rs index 70e89122..3d660c08 100644 --- a/src/event_scanner/scanner/mod.rs +++ b/src/event_scanner/scanner/mod.rs @@ -400,9 +400,12 @@ impl EventScannerBuilder { /// /// Returns an error if the connection fails #[must_use] - pub fn connect(self, provider: RootProvider) -> EventScanner { - let block_range_scanner = self.block_range_scanner.connect::(provider); - EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() } + pub async fn connect( + self, + provider: RootProvider, + ) -> TransportResult> { + let block_range_scanner = self.block_range_scanner.connect::(provider).await?; + Ok(EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() }) } } @@ -454,10 +457,10 @@ mod tests { assert_eq!(builder.config.block_confirmations, DEFAULT_BLOCK_CONFIRMATIONS); } - #[test] - fn test_historic_event_stream_listeners_vector_updates() { + #[tokio::test] + async fn test_historic_event_stream_listeners_vector_updates() -> anyhow::Result<()> { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = EventScannerBuilder::historic().connect::(provider); + let mut scanner = EventScannerBuilder::historic().connect::(provider).await?; assert!(scanner.listeners.is_empty()); @@ -467,16 +470,20 @@ mod tests { let _stream2 = scanner.subscribe(EventFilter::new()); let _stream3 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 3); + + Ok(()) } - #[test] - fn test_historic_event_stream_channel_capacity() { + #[tokio::test] + async fn test_historic_event_stream_channel_capacity() -> anyhow::Result<()> { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = EventScannerBuilder::historic().connect::(provider); + let mut scanner = EventScannerBuilder::historic().connect::(provider).await?; let _ = scanner.subscribe(EventFilter::new()); let sender = &scanner.listeners[0].sender; assert_eq!(sender.capacity(), MAX_BUFFERED_MESSAGES); + + Ok(()) } } From 6bab7246f0878b73ad6397c3cdbc564ee5d90945 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 30 Oct 2025 18:44:53 +0900 Subject: [PATCH 067/122] ref: remove generic Network param from builder --- src/block_range_scanner.rs | 1 - src/event_scanner/scanner/historic.rs | 11 ++--------- src/event_scanner/scanner/latest.rs | 11 ++--------- src/event_scanner/scanner/live.rs | 9 +-------- src/event_scanner/scanner/mod.rs | 1 - src/event_scanner/scanner/sync/from_block.rs | 11 ++--------- src/event_scanner/scanner/sync/from_latest.rs | 10 +--------- src/event_scanner/scanner/sync/mod.rs | 12 ++++++------ tests/latest_events/basic.rs | 6 +++--- 9 files changed, 17 insertions(+), 55 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 41ce29d9..31c7a557 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -249,7 +249,6 @@ impl BlockRangeScanner { /// # Errors /// /// Returns an error if any fallback connection fails - #[must_use] pub async fn connect( self, provider: RootProvider, diff --git a/src/event_scanner/scanner/historic.rs b/src/event_scanner/scanner/historic.rs index f7fa71dc..8e770d53 100644 --- a/src/event_scanner/scanner/historic.rs +++ b/src/event_scanner/scanner/historic.rs @@ -1,4 +1,4 @@ -use alloy::{eips::BlockNumberOrTag, network::Network, providers::RootProvider}; +use alloy::{eips::BlockNumberOrTag, network::Network}; use super::common::{ConsumerMode, handle_stream}; use crate::{ @@ -6,14 +6,7 @@ use crate::{ event_scanner::scanner::{EventScanner, Historic}, }; -impl EventScannerBuilder { - /// Adds a fallback provider (can add multiple) - #[must_use] - pub fn fallback_provider(mut self, provider: RootProvider) -> Self { - self.block_range_scanner.fallback_providers.push(provider); - self - } - +impl EventScannerBuilder { #[must_use] pub fn max_block_range(mut self, max_block_range: u64) -> Self { self.block_range_scanner.max_block_range = max_block_range; diff --git a/src/event_scanner/scanner/latest.rs b/src/event_scanner/scanner/latest.rs index ff5602fd..9ba8552f 100644 --- a/src/event_scanner/scanner/latest.rs +++ b/src/event_scanner/scanner/latest.rs @@ -1,4 +1,4 @@ -use alloy::{eips::BlockNumberOrTag, network::Network, providers::RootProvider}; +use alloy::{eips::BlockNumberOrTag, network::Network}; use super::common::{ConsumerMode, handle_stream}; use crate::{ @@ -6,14 +6,7 @@ use crate::{ event_scanner::{EventScanner, LatestEvents}, }; -impl EventScannerBuilder { - /// Adds a fallback provider (can add multiple) - #[must_use] - pub fn fallback_provider(mut self, provider: RootProvider) -> Self { - self.block_range_scanner.fallback_providers.push(provider); - self - } - +impl EventScannerBuilder { #[must_use] pub fn max_block_range(mut self, max_block_range: u64) -> Self { self.block_range_scanner.max_block_range = max_block_range; diff --git a/src/event_scanner/scanner/live.rs b/src/event_scanner/scanner/live.rs index 5d4e4974..3e67aef1 100644 --- a/src/event_scanner/scanner/live.rs +++ b/src/event_scanner/scanner/live.rs @@ -6,14 +6,7 @@ use crate::{ event_scanner::{EventScanner, scanner::Live}, }; -impl EventScannerBuilder { - /// Adds a fallback provider (can add multiple) - #[must_use] - pub fn fallback_provider(mut self, provider: RootProvider) -> Self { - self.block_range_scanner.fallback_providers.push(provider); - self - } - +impl EventScannerBuilder { #[must_use] pub fn max_block_range(mut self, max_block_range: u64) -> Self { self.block_range_scanner.max_block_range = max_block_range; diff --git a/src/event_scanner/scanner/mod.rs b/src/event_scanner/scanner/mod.rs index 3d660c08..dfa2b9f7 100644 --- a/src/event_scanner/scanner/mod.rs +++ b/src/event_scanner/scanner/mod.rs @@ -399,7 +399,6 @@ impl EventScannerBuilder { /// # Errors /// /// Returns an error if the connection fails - #[must_use] pub async fn connect( self, provider: RootProvider, diff --git a/src/event_scanner/scanner/sync/from_block.rs b/src/event_scanner/scanner/sync/from_block.rs index 786df558..40b58dd8 100644 --- a/src/event_scanner/scanner/sync/from_block.rs +++ b/src/event_scanner/scanner/sync/from_block.rs @@ -1,4 +1,4 @@ -use alloy::{network::Network, providers::RootProvider}; +use alloy::network::Network; use crate::{ EventScannerBuilder, ScannerError, @@ -8,14 +8,7 @@ use crate::{ }, }; -impl EventScannerBuilder { - /// Adds a fallback provider (can add multiple) - #[must_use] - pub fn fallback_provider(mut self, provider: RootProvider) -> Self { - self.block_range_scanner.fallback_providers.push(provider); - self - } - +impl EventScannerBuilder { #[must_use] pub fn max_block_range(mut self, max_block_range: u64) -> Self { self.block_range_scanner.max_block_range = max_block_range; diff --git a/src/event_scanner/scanner/sync/from_latest.rs b/src/event_scanner/scanner/sync/from_latest.rs index 817c5906..27753718 100644 --- a/src/event_scanner/scanner/sync/from_latest.rs +++ b/src/event_scanner/scanner/sync/from_latest.rs @@ -2,7 +2,6 @@ use alloy::{ consensus::BlockHeader, eips::BlockNumberOrTag, network::{BlockResponse, Network}, - providers::RootProvider, }; use tokio::sync::mpsc; @@ -21,14 +20,7 @@ use crate::{ }, }; -impl EventScannerBuilder { - /// Adds a fallback provider (can add multiple) - #[must_use] - pub fn fallback_provider(mut self, provider: RootProvider) -> Self { - self.block_range_scanner.fallback_providers.push(provider); - self - } - +impl EventScannerBuilder { #[must_use] pub fn block_confirmations(mut self, confirmations: u64) -> Self { self.config.block_confirmations = confirmations; diff --git a/src/event_scanner/scanner/sync/mod.rs b/src/event_scanner/scanner/sync/mod.rs index b01e6aa3..6701752e 100644 --- a/src/event_scanner/scanner/sync/mod.rs +++ b/src/event_scanner/scanner/sync/mod.rs @@ -1,4 +1,4 @@ -use alloy::{eips::BlockNumberOrTag, network::Network}; +use alloy::eips::BlockNumberOrTag; pub(crate) mod from_block; pub(crate) mod from_latest; @@ -8,7 +8,7 @@ use crate::{ event_scanner::scanner::{SyncFromBlock, SyncFromLatestEvents, Synchronize}, }; -impl EventScannerBuilder { +impl EventScannerBuilder { /// Scans the latest `count` matching events per registered listener, then automatically /// transitions to live streaming mode. /// @@ -101,8 +101,8 @@ impl EventScannerBuilder { /// [reorg]: crate::types::ScannerStatus::ReorgDetected /// [switch_to_live]: crate::types::ScannerStatus::SwitchingToLive #[must_use] - pub fn from_latest(self, count: usize) -> EventScannerBuilder { - EventScannerBuilder::::new(count) + pub fn from_latest(self, count: usize) -> EventScannerBuilder { + EventScannerBuilder::::new(count) } /// Streams events from a specific starting block to the present, then automatically @@ -209,7 +209,7 @@ impl EventScannerBuilder { pub fn from_block( self, block: impl Into, - ) -> EventScannerBuilder { - EventScannerBuilder::::new(block.into()) + ) -> EventScannerBuilder { + EventScannerBuilder::::new(block.into()) } } diff --git a/tests/latest_events/basic.rs b/tests/latest_events/basic.rs index e789cdf5..05235c00 100644 --- a/tests/latest_events/basic.rs +++ b/tests/latest_events/basic.rs @@ -107,7 +107,7 @@ async fn latest_scanner_respects_range_subset() -> anyhow::Result<()> { let mut scanner_with_range = EventScannerBuilder::latest(10) .from_block(start) .to_block(end) - .connect_ws(anvil.ws_endpoint_url()) + .connect_ws::(anvil.ws_endpoint_url()) .await?; let mut stream_with_range = scanner_with_range.subscribe(default_filter); @@ -303,7 +303,7 @@ async fn latest_scanner_large_gaps_and_empty_ranges() -> anyhow::Result<()> { let mut scanner_with_range = EventScannerBuilder::latest(5) .from_block(start) .to_block(end) - .connect_ws(anvil.ws_endpoint_url()) + .connect_ws::(anvil.ws_endpoint_url()) .await?; let mut stream_with_range = scanner_with_range.subscribe(default_filter); @@ -336,7 +336,7 @@ async fn latest_scanner_boundary_range_single_block() -> anyhow::Result<()> { let mut scanner_with_range = EventScannerBuilder::latest(5) .from_block(start) .to_block(end) - .connect_ws(anvil.ws_endpoint_url()) + .connect_ws::(anvil.ws_endpoint_url()) .await?; let mut stream_with_range = scanner_with_range.subscribe(default_filter); From 77eabd54800947e3d9d81bafe5fc49f8980cea63 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 30 Oct 2025 19:00:25 +0900 Subject: [PATCH 068/122] feat: add fallback provider logic to event scanners --- src/block_range_scanner.rs | 61 +++++++++-------- src/event_scanner/scanner/mod.rs | 110 +++++++++++++++++++++++++++++-- 2 files changed, 140 insertions(+), 31 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 31c7a557..092a045a 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -217,7 +217,9 @@ impl BlockRangeScanner { let provider = RootProvider::::new(ClientBuilder::default().ws(WsConnect::new(ws_url)).await?); - let fallback_providers = self.connect_all_fallbacks::().await?; + let fallback_providers = + Self::connect_all_fallbacks::(&self.fallback_ws_urls, &self.fallback_ipc_paths) + .await?; Ok(self.connect_with_fallbacks(provider, fallback_providers)) } @@ -237,7 +239,9 @@ impl BlockRangeScanner { let provider = RootProvider::::new(ClientBuilder::default().ipc(IpcConnect::new(ipc_path)).await?); - let fallback_providers = self.connect_all_fallbacks::().await?; + let fallback_providers = + Self::connect_all_fallbacks::(&self.fallback_ws_urls, &self.fallback_ipc_paths) + .await?; Ok(self.connect_with_fallbacks(provider, fallback_providers)) } @@ -253,37 +257,16 @@ impl BlockRangeScanner { self, provider: RootProvider, ) -> Result, RpcError> { - let fallback_providers = self.connect_all_fallbacks::().await?; + let fallback_providers = + Self::connect_all_fallbacks::(&self.fallback_ws_urls, &self.fallback_ipc_paths) + .await?; Ok(self.connect_with_fallbacks(provider, fallback_providers)) } - /// Establishes connections to all configured fallback providers (both WebSocket and IPC). - /// - /// # Errors - /// - /// Returns an error if any fallback connection fails - async fn connect_all_fallbacks( - &self, - ) -> Result>, RpcError> { - let mut fallback_providers = Vec::new(); - - for url in &self.fallback_ws_urls { - let client = ClientBuilder::default().ws(WsConnect::new(url.clone())).await?; - fallback_providers.push(RootProvider::::new(client)); - } - - for path in &self.fallback_ipc_paths { - let client = ClientBuilder::default().ipc(IpcConnect::new(path.clone())).await?; - fallback_providers.push(RootProvider::::new(client)); - } - - Ok(fallback_providers) - } - /// Connects to an existing provider with fallback providers #[must_use] - fn connect_with_fallbacks( + pub fn connect_with_fallbacks( self, provider: RootProvider, fallback_providers: Vec>, @@ -302,6 +285,30 @@ impl BlockRangeScanner { max_block_range: self.max_block_range, } } + + /// Establishes connections to all configured fallback providers (both WebSocket and IPC). + /// + /// # Errors + /// + /// Returns an error if any fallback connection fails + async fn connect_all_fallbacks( + fallback_ws_urls: &Vec, + fallback_ipc_paths: &Vec, + ) -> Result>, RpcError> { + let mut fallback_providers = Vec::new(); + + for url in fallback_ws_urls { + let client = ClientBuilder::default().ws(WsConnect::new(url.clone())).await?; + fallback_providers.push(RootProvider::::new(client)); + } + + for path in fallback_ipc_paths { + let client = ClientBuilder::default().ipc(IpcConnect::new(path.clone())).await?; + fallback_providers.push(RootProvider::::new(client)); + } + + Ok(fallback_providers) + } } pub struct ConnectedBlockRangeScanner { diff --git a/src/event_scanner/scanner/mod.rs b/src/event_scanner/scanner/mod.rs index dfa2b9f7..d86473fd 100644 --- a/src/event_scanner/scanner/mod.rs +++ b/src/event_scanner/scanner/mod.rs @@ -365,9 +365,62 @@ impl EventScannerBuilder { } impl EventScannerBuilder { + /// Adds a fallback WebSocket URL to the scanner. + /// + /// The WebSocket connection will be established when calling the `connect` methods. + /// Multiple fallback providers can be added by calling this method multiple times. + /// + /// # Example + /// + /// ```no_run + /// # use alloy::network::Ethereum; + /// # use event_scanner::EventScannerBuilder; + /// # + /// # async fn example() -> Result<(), Box> { + /// # let ws_url = "ws://localhost:8545".parse()?; + /// # let fallback_url = "ws://fallback:8545".parse()?; + /// let scanner = EventScannerBuilder::historic() + /// .fallback_ws(fallback_url) + /// .connect_ws::(ws_url) + /// .await?; + /// # Ok(()) + /// # } + /// ``` + #[must_use] + pub fn fallback_ws(mut self, url: Url) -> Self { + self.block_range_scanner = self.block_range_scanner.fallback_ws(url); + self + } + + /// Adds a fallback IPC path to the scanner. + /// + /// The IPC connection will be established when calling the `connect` methods. + /// Multiple fallback providers can be added by calling this method multiple times. + /// + /// # Example + /// + /// ```no_run + /// # use alloy::network::Ethereum; + /// # use event_scanner::EventScannerBuilder; + /// # + /// # async fn example() -> Result<(), Box> { + /// # let ws_url = "ws://localhost:8545".parse()?; + /// let scanner = EventScannerBuilder::historic() + /// .fallback_ipc("/tmp/fallback.ipc".to_string()) + /// .connect_ws::(ws_url) + /// .await?; + /// # Ok(()) + /// # } + /// ``` + #[must_use] + pub fn fallback_ipc(mut self, path: String) -> Self { + self.block_range_scanner = self.block_range_scanner.fallback_ipc(path); + self + } + /// Connects to the provider via WebSocket. /// - /// Final builder method: consumes the builder and returns the built [`HistoricEventScanner`]. + /// Final builder method: consumes the builder and returns the built [`EventScanner`]. /// /// # Errors /// @@ -379,7 +432,7 @@ impl EventScannerBuilder { /// Connects to the provider via IPC. /// - /// Final builder method: consumes the builder and returns the built [`HistoricEventScanner`]. + /// Final builder method: consumes the builder and returns the built [`EventScanner`]. /// /// # Errors /// @@ -394,11 +447,11 @@ impl EventScannerBuilder { /// Connects to an existing provider. /// - /// Final builder method: consumes the builder and returns the built [`HistoricEventScanner`]. + /// Final builder method: consumes the builder and returns the built [`EventScanner`]. /// /// # Errors /// - /// Returns an error if the connection fails + /// Returns an error if any fallback connection fails pub async fn connect( self, provider: RootProvider, @@ -406,6 +459,19 @@ impl EventScannerBuilder { let block_range_scanner = self.block_range_scanner.connect::(provider).await?; Ok(EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() }) } + + /// Connects to an existing provider with fallback providers + /// + /// Final builder method: consumes the builder and returns the built [`EventScanner`]. + pub fn connect_with_fallbacks( + self, + provider: RootProvider, + fallback_providers: Vec>, + ) -> EventScanner { + let block_range_scanner = + self.block_range_scanner.connect_with_fallbacks::(provider, fallback_providers); + EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() } + } } impl EventScanner { @@ -485,4 +551,40 @@ mod tests { Ok(()) } + + #[test] + fn test_scanner_builder_fallback_methods() { + let ws_url: Url = "ws://fallback:8545".parse().unwrap(); + let ipc_path = "/tmp/fallback.ipc".to_string(); + + let builder = EventScannerBuilder::historic() + .fallback_ws(ws_url.clone()) + .fallback_ipc(ipc_path.clone()); + + assert_eq!(builder.block_range_scanner.fallback_ws_urls.len(), 1); + assert_eq!(builder.block_range_scanner.fallback_ws_urls[0], ws_url); + assert_eq!(builder.block_range_scanner.fallback_ipc_paths.len(), 1); + assert_eq!(builder.block_range_scanner.fallback_ipc_paths[0], ipc_path); + } + + #[test] + fn test_scanner_builder_multiple_fallbacks() { + let ws_url1: Url = "ws://fallback1:8545".parse().unwrap(); + let ws_url2: Url = "ws://fallback2:8545".parse().unwrap(); + let ipc_path1 = "/tmp/fallback1.ipc".to_string(); + let ipc_path2 = "/tmp/fallback2.ipc".to_string(); + + let builder = EventScannerBuilder::live() + .fallback_ws(ws_url1.clone()) + .fallback_ws(ws_url2.clone()) + .fallback_ipc(ipc_path1.clone()) + .fallback_ipc(ipc_path2.clone()); + + assert_eq!(builder.block_range_scanner.fallback_ws_urls.len(), 2); + assert_eq!(builder.block_range_scanner.fallback_ws_urls[0], ws_url1); + assert_eq!(builder.block_range_scanner.fallback_ws_urls[1], ws_url2); + assert_eq!(builder.block_range_scanner.fallback_ipc_paths.len(), 2); + assert_eq!(builder.block_range_scanner.fallback_ipc_paths[0], ipc_path1); + assert_eq!(builder.block_range_scanner.fallback_ipc_paths[1], ipc_path2); + } } From 01dfc749c869de8286bbd56b4e7a0e321a907cd8 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 30 Oct 2025 19:07:20 +0900 Subject: [PATCH 069/122] fix: doctest --- src/block_range_scanner.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 092a045a..ac926d5c 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -22,8 +22,8 @@ //! tracing_subscriber::fmt::init(); //! //! // Configuration -//! let block_range_scanner = BlockRangeScanner::::new() -//! .connect_ws(Url::parse("ws://localhost:8546").unwrap()) +//! let block_range_scanner = BlockRangeScanner::new() +//! .connect_ws::(Url::parse("ws://localhost:8546").unwrap()) //! .await?; //! //! // Create client to send subscribe command to block scanner From a1ad2060241ee531807f3085f01a5f83629a7599 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 30 Oct 2025 22:16:59 +0900 Subject: [PATCH 070/122] feat: remove all connect_x methods and pass provider in directly up --- src/block_range_scanner.rs | 164 ++----------------------------- src/event_scanner/scanner/mod.rs | 143 +-------------------------- src/lib.rs | 2 +- 3 files changed, 14 insertions(+), 295 deletions(-) diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index ac926d5c..e3faa200 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -58,7 +58,7 @@ //! } //! ``` -use std::{cmp::Ordering, ops::RangeInclusive, time::Duration}; +use std::{cmp::Ordering, ops::RangeInclusive}; use tokio::{ sync::{mpsc, oneshot}, try_join, @@ -68,10 +68,7 @@ use tokio_stream::{StreamExt, wrappers::ReceiverStream}; use crate::{ ScannerMessage, error::ScannerError, - robust_provider::{ - DEFAULT_MAX_RETRIES, DEFAULT_MAX_TIMEOUT, DEFAULT_RETRY_INTERVAL, - Error as RobustProviderError, RobustProvider, - }, + robust_provider::{Error as RobustProviderError, RobustProvider}, types::{ScannerStatus, TryStream}, }; use alloy::{ @@ -79,13 +76,8 @@ use alloy::{ eips::BlockNumberOrTag, network::{BlockResponse, Network, primitives::HeaderResponse}, primitives::{B256, BlockNumber}, - providers::RootProvider, pubsub::Subscription, - rpc::client::ClientBuilder, - transports::{ - RpcError, TransportErrorKind, TransportResult, http::reqwest::Url, ipc::IpcConnect, - ws::WsConnect, - }, + transports::{RpcError, TransportErrorKind}, }; use tracing::{debug, error, info, warn}; @@ -134,11 +126,6 @@ impl From for Message { #[derive(Clone)] pub struct BlockRangeScanner { pub max_block_range: u64, - pub max_timeout: Duration, - pub max_retries: usize, - pub retry_interval: Duration, - pub fallback_ws_urls: Vec, - pub fallback_ipc_paths: Vec, } impl Default for BlockRangeScanner { @@ -150,14 +137,7 @@ impl Default for BlockRangeScanner { impl BlockRangeScanner { #[must_use] pub fn new() -> Self { - Self { - max_block_range: DEFAULT_MAX_BLOCK_RANGE, - max_timeout: DEFAULT_MAX_TIMEOUT, - max_retries: DEFAULT_MAX_RETRIES, - retry_interval: DEFAULT_RETRY_INTERVAL, - fallback_ws_urls: Vec::new(), - fallback_ipc_paths: Vec::new(), - } + Self { max_block_range: DEFAULT_MAX_BLOCK_RANGE } } #[must_use] @@ -166,86 +146,6 @@ impl BlockRangeScanner { self } - #[must_use] - pub fn with_max_timeout(mut self, rpc_timeout: Duration) -> Self { - self.max_timeout = rpc_timeout; - self - } - - #[must_use] - pub fn with_max_retries(mut self, rpc_max_retries: usize) -> Self { - self.max_retries = rpc_max_retries; - self - } - - #[must_use] - pub fn with_retry_interval(mut self, rpc_retry_interval: Duration) -> Self { - self.retry_interval = rpc_retry_interval; - self - } - - /// Adds a fallback WebSocket URL to the block range scanner - /// - /// The WebSocket connection will be established when calling the `connect` methods - #[must_use] - pub fn fallback_ws(mut self, url: Url) -> Self { - self.fallback_ws_urls.push(url); - self - } - - /// Adds a fallback IPC path to the block range scanner - /// - /// The IPC connection will be established when calling the `connect` methods - #[must_use] - pub fn fallback_ipc(mut self, path: String) -> Self { - self.fallback_ipc_paths.push(path); - self - } - - /// Connects to the provider via WebSocket - /// - /// This method establishes the primary WebSocket connection and all configured fallback - /// connections (both WebSocket and IPC). - /// - /// # Errors - /// - /// Returns an error if the primary connection fails - pub async fn connect_ws( - self, - ws_url: Url, - ) -> TransportResult> { - let provider = - RootProvider::::new(ClientBuilder::default().ws(WsConnect::new(ws_url)).await?); - - let fallback_providers = - Self::connect_all_fallbacks::(&self.fallback_ws_urls, &self.fallback_ipc_paths) - .await?; - - Ok(self.connect_with_fallbacks(provider, fallback_providers)) - } - - /// Connects to the provider via IPC - /// - /// This method establishes the primary IPC connection and all configured fallback - /// connections (both WebSocket and IPC). - /// - /// # Errors - /// - /// Returns an error if the primary connection fails - pub async fn connect_ipc( - self, - ipc_path: String, - ) -> Result, RpcError> { - let provider = - RootProvider::::new(ClientBuilder::default().ipc(IpcConnect::new(ipc_path)).await?); - - let fallback_providers = - Self::connect_all_fallbacks::(&self.fallback_ws_urls, &self.fallback_ipc_paths) - .await?; - - Ok(self.connect_with_fallbacks(provider, fallback_providers)) - } - /// Connects to an existing provider /// /// This method also tries to connect any fallback providers (both WebSocket and IPC) @@ -253,62 +153,15 @@ impl BlockRangeScanner { /// # Errors /// /// Returns an error if any fallback connection fails - pub async fn connect( - self, - provider: RootProvider, - ) -> Result, RpcError> { - let fallback_providers = - Self::connect_all_fallbacks::(&self.fallback_ws_urls, &self.fallback_ipc_paths) - .await?; - - Ok(self.connect_with_fallbacks(provider, fallback_providers)) - } - - /// Connects to an existing provider with fallback providers - #[must_use] - pub fn connect_with_fallbacks( + pub fn connect( self, - provider: RootProvider, - fallback_providers: Vec>, + robust_provider: RobustProvider, ) -> ConnectedBlockRangeScanner { - let mut robust_provider = RobustProvider::new(provider) - .max_timeout(self.max_timeout) - .max_retries(self.max_retries) - .retry_interval(self.retry_interval); - - for fallback in fallback_providers { - robust_provider = robust_provider.fallback_provider(fallback); - } - ConnectedBlockRangeScanner { provider: robust_provider, max_block_range: self.max_block_range, } } - - /// Establishes connections to all configured fallback providers (both WebSocket and IPC). - /// - /// # Errors - /// - /// Returns an error if any fallback connection fails - async fn connect_all_fallbacks( - fallback_ws_urls: &Vec, - fallback_ipc_paths: &Vec, - ) -> Result>, RpcError> { - let mut fallback_providers = Vec::new(); - - for url in fallback_ws_urls { - let client = ClientBuilder::default().ws(WsConnect::new(url.clone())).await?; - fallback_providers.push(RootProvider::::new(client)); - } - - for path in fallback_ipc_paths { - let client = ClientBuilder::default().ipc(IpcConnect::new(path.clone())).await?; - fallback_providers.push(RootProvider::::new(client)); - } - - Ok(fallback_providers) - } } pub struct ConnectedBlockRangeScanner { @@ -1024,7 +877,7 @@ mod tests { use alloy::{ eips::BlockId, network::Ethereum, - providers::{ProviderBuilder, ext::AnvilApi}, + providers::{Provider, ProviderBuilder, ext::AnvilApi}, rpc::types::anvil::ReorgOptions, }; use alloy_node_bindings::Anvil; @@ -1050,8 +903,9 @@ mod tests { #[tokio::test] async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + provider.subscribe_blocks(); // --- Zero block confirmations -> stream immediately --- let client = BlockRangeScanner::new() diff --git a/src/event_scanner/scanner/mod.rs b/src/event_scanner/scanner/mod.rs index d86473fd..3f7764b8 100644 --- a/src/event_scanner/scanner/mod.rs +++ b/src/event_scanner/scanner/mod.rs @@ -14,6 +14,7 @@ use crate::{ MAX_BUFFERED_MESSAGES, }, event_scanner::listener::EventListener, + robust_provider::RobustProvider, }; mod common; @@ -365,111 +366,11 @@ impl EventScannerBuilder { } impl EventScannerBuilder { - /// Adds a fallback WebSocket URL to the scanner. - /// - /// The WebSocket connection will be established when calling the `connect` methods. - /// Multiple fallback providers can be added by calling this method multiple times. - /// - /// # Example - /// - /// ```no_run - /// # use alloy::network::Ethereum; - /// # use event_scanner::EventScannerBuilder; - /// # - /// # async fn example() -> Result<(), Box> { - /// # let ws_url = "ws://localhost:8545".parse()?; - /// # let fallback_url = "ws://fallback:8545".parse()?; - /// let scanner = EventScannerBuilder::historic() - /// .fallback_ws(fallback_url) - /// .connect_ws::(ws_url) - /// .await?; - /// # Ok(()) - /// # } - /// ``` - #[must_use] - pub fn fallback_ws(mut self, url: Url) -> Self { - self.block_range_scanner = self.block_range_scanner.fallback_ws(url); - self - } - - /// Adds a fallback IPC path to the scanner. - /// - /// The IPC connection will be established when calling the `connect` methods. - /// Multiple fallback providers can be added by calling this method multiple times. - /// - /// # Example - /// - /// ```no_run - /// # use alloy::network::Ethereum; - /// # use event_scanner::EventScannerBuilder; - /// # - /// # async fn example() -> Result<(), Box> { - /// # let ws_url = "ws://localhost:8545".parse()?; - /// let scanner = EventScannerBuilder::historic() - /// .fallback_ipc("/tmp/fallback.ipc".to_string()) - /// .connect_ws::(ws_url) - /// .await?; - /// # Ok(()) - /// # } - /// ``` - #[must_use] - pub fn fallback_ipc(mut self, path: String) -> Self { - self.block_range_scanner = self.block_range_scanner.fallback_ipc(path); - self - } - - /// Connects to the provider via WebSocket. - /// - /// Final builder method: consumes the builder and returns the built [`EventScanner`]. - /// - /// # Errors - /// - /// Returns an error if the connection fails - pub async fn connect_ws(self, ws_url: Url) -> TransportResult> { - let block_range_scanner = self.block_range_scanner.connect_ws::(ws_url).await?; - Ok(EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() }) - } - - /// Connects to the provider via IPC. - /// - /// Final builder method: consumes the builder and returns the built [`EventScanner`]. - /// - /// # Errors - /// - /// Returns an error if the connection fails - pub async fn connect_ipc( - self, - ipc_path: String, - ) -> TransportResult> { - let block_range_scanner = self.block_range_scanner.connect_ipc::(ipc_path).await?; - Ok(EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() }) - } - /// Connects to an existing provider. /// /// Final builder method: consumes the builder and returns the built [`EventScanner`]. - /// - /// # Errors - /// - /// Returns an error if any fallback connection fails - pub async fn connect( - self, - provider: RootProvider, - ) -> TransportResult> { - let block_range_scanner = self.block_range_scanner.connect::(provider).await?; - Ok(EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() }) - } - - /// Connects to an existing provider with fallback providers - /// - /// Final builder method: consumes the builder and returns the built [`EventScanner`]. - pub fn connect_with_fallbacks( - self, - provider: RootProvider, - fallback_providers: Vec>, - ) -> EventScanner { - let block_range_scanner = - self.block_range_scanner.connect_with_fallbacks::(provider, fallback_providers); + pub fn connect(self, provider: RobustProvider) -> EventScanner { + let block_range_scanner = self.block_range_scanner.connect::(provider); EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() } } } @@ -525,7 +426,7 @@ mod tests { #[tokio::test] async fn test_historic_event_stream_listeners_vector_updates() -> anyhow::Result<()> { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = EventScannerBuilder::historic().connect::(provider).await?; + let mut scanner = EventScannerBuilder::historic().connect::(provider); assert!(scanner.listeners.is_empty()); @@ -551,40 +452,4 @@ mod tests { Ok(()) } - - #[test] - fn test_scanner_builder_fallback_methods() { - let ws_url: Url = "ws://fallback:8545".parse().unwrap(); - let ipc_path = "/tmp/fallback.ipc".to_string(); - - let builder = EventScannerBuilder::historic() - .fallback_ws(ws_url.clone()) - .fallback_ipc(ipc_path.clone()); - - assert_eq!(builder.block_range_scanner.fallback_ws_urls.len(), 1); - assert_eq!(builder.block_range_scanner.fallback_ws_urls[0], ws_url); - assert_eq!(builder.block_range_scanner.fallback_ipc_paths.len(), 1); - assert_eq!(builder.block_range_scanner.fallback_ipc_paths[0], ipc_path); - } - - #[test] - fn test_scanner_builder_multiple_fallbacks() { - let ws_url1: Url = "ws://fallback1:8545".parse().unwrap(); - let ws_url2: Url = "ws://fallback2:8545".parse().unwrap(); - let ipc_path1 = "/tmp/fallback1.ipc".to_string(); - let ipc_path2 = "/tmp/fallback2.ipc".to_string(); - - let builder = EventScannerBuilder::live() - .fallback_ws(ws_url1.clone()) - .fallback_ws(ws_url2.clone()) - .fallback_ipc(ipc_path1.clone()) - .fallback_ipc(ipc_path2.clone()); - - assert_eq!(builder.block_range_scanner.fallback_ws_urls.len(), 2); - assert_eq!(builder.block_range_scanner.fallback_ws_urls[0], ws_url1); - assert_eq!(builder.block_range_scanner.fallback_ws_urls[1], ws_url2); - assert_eq!(builder.block_range_scanner.fallback_ipc_paths.len(), 2); - assert_eq!(builder.block_range_scanner.fallback_ipc_paths[0], ipc_path1); - assert_eq!(builder.block_range_scanner.fallback_ipc_paths[1], ipc_path2); - } } diff --git a/src/lib.rs b/src/lib.rs index 1b374057..496051a7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,6 @@ pub mod block_range_scanner; -mod robust_provider; +pub mod robust_provider; #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; From c81ddb211d2c91211182adc7f9b18a2a5df1b292 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 30 Oct 2025 22:54:08 +0900 Subject: [PATCH 071/122] ref: remove connect_ methods and just connect directly --- examples/historical_scanning/main.rs | 17 ++-- examples/latest_events_scanning/main.rs | 17 ++-- examples/live_scanning/main.rs | 18 +++-- examples/sync_from_block_scanning/main.rs | 21 +++-- examples/sync_from_latest_scanning/main.rs | 21 +++-- src/block_range_scanner.rs | 93 +++++++++------------- src/event_scanner/scanner/mod.rs | 13 +-- src/robust_provider.rs | 6 ++ tests/common.rs | 17 ++-- tests/latest_events/basic.rs | 15 ++-- 10 files changed, 125 insertions(+), 113 deletions(-) diff --git a/examples/historical_scanning/main.rs b/examples/historical_scanning/main.rs index f95476b0..88a5583a 100644 --- a/examples/historical_scanning/main.rs +++ b/examples/historical_scanning/main.rs @@ -1,4 +1,9 @@ -use alloy::{network::Ethereum, providers::ProviderBuilder, sol, sol_types::SolEvent}; +use alloy::{ + network::Ethereum, + providers::{Provider, ProviderBuilder}, + sol, + sol_types::SolEvent, +}; use alloy_node_bindings::Anvil; use event_scanner::{EventFilter, EventScannerBuilder, Message}; @@ -38,9 +43,11 @@ async fn main() -> anyhow::Result<()> { let anvil = Anvil::new().block_time_f64(0.1).try_spawn()?; let wallet = anvil.wallet(); - let provider = - ProviderBuilder::new().wallet(wallet.unwrap()).connect(anvil.endpoint().as_str()).await?; - let counter_contract = Counter::deploy(provider).await?; + let provider = ProviderBuilder::new() + .wallet(wallet.unwrap()) + .connect(anvil.ws_endpoint_url().as_str()) + .await?; + let counter_contract = Counter::deploy(provider.clone()).await?; let contract_address = counter_contract.address(); @@ -51,7 +58,7 @@ async fn main() -> anyhow::Result<()> { let _ = counter_contract.increase().send().await?.get_receipt().await?; let mut scanner = - EventScannerBuilder::historic().connect_ws::(anvil.ws_endpoint_url()).await?; + EventScannerBuilder::historic().connect::(provider.root().to_owned()); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/latest_events_scanning/main.rs b/examples/latest_events_scanning/main.rs index dad70168..6bf48a5a 100644 --- a/examples/latest_events_scanning/main.rs +++ b/examples/latest_events_scanning/main.rs @@ -1,4 +1,9 @@ -use alloy::{network::Ethereum, providers::ProviderBuilder, sol, sol_types::SolEvent}; +use alloy::{ + network::Ethereum, + providers::{Provider, ProviderBuilder}, + sol, + sol_types::SolEvent, +}; use alloy_node_bindings::Anvil; use event_scanner::{EventFilter, EventScannerBuilder, Message}; use tokio_stream::StreamExt; @@ -37,9 +42,11 @@ async fn main() -> anyhow::Result<()> { let anvil = Anvil::new().block_time_f64(0.5).try_spawn()?; let wallet = anvil.wallet(); - let provider = - ProviderBuilder::new().wallet(wallet.unwrap()).connect(anvil.endpoint().as_str()).await?; - let counter_contract = Counter::deploy(provider).await?; + let provider = ProviderBuilder::new() + .wallet(wallet.unwrap()) + .connect(anvil.ws_endpoint_url().as_str()) + .await?; + let counter_contract = Counter::deploy(provider.clone()).await?; let contract_address = counter_contract.address(); @@ -48,7 +55,7 @@ async fn main() -> anyhow::Result<()> { .event(Counter::CountIncreased::SIGNATURE); let mut scanner = - EventScannerBuilder::latest(5).connect_ws::(anvil.ws_endpoint_url()).await?; + EventScannerBuilder::latest(5).connect::(provider.root().to_owned()); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/live_scanning/main.rs b/examples/live_scanning/main.rs index ab081494..2d1ae12d 100644 --- a/examples/live_scanning/main.rs +++ b/examples/live_scanning/main.rs @@ -1,4 +1,9 @@ -use alloy::{network::Ethereum, providers::ProviderBuilder, sol, sol_types::SolEvent}; +use alloy::{ + network::Ethereum, + providers::{Provider, ProviderBuilder}, + sol, + sol_types::SolEvent, +}; use alloy_node_bindings::Anvil; use event_scanner::{EventFilter, EventScannerBuilder, Message}; @@ -38,9 +43,11 @@ async fn main() -> anyhow::Result<()> { let anvil = Anvil::new().block_time(1).try_spawn()?; let wallet = anvil.wallet(); - let provider = - ProviderBuilder::new().wallet(wallet.unwrap()).connect(anvil.endpoint().as_str()).await?; - let counter_contract = Counter::deploy(provider).await?; + let provider = ProviderBuilder::new() + .wallet(wallet.unwrap()) + .connect(anvil.ws_endpoint_url().as_str()) + .await?; + let counter_contract = Counter::deploy(provider.clone()).await?; let contract_address = counter_contract.address(); @@ -48,8 +55,7 @@ async fn main() -> anyhow::Result<()> { .contract_address(*contract_address) .event(Counter::CountIncreased::SIGNATURE); - let mut scanner = - EventScannerBuilder::live().connect_ws::(anvil.ws_endpoint_url()).await?; + let mut scanner = EventScannerBuilder::live().connect::(provider.root().to_owned()); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/sync_from_block_scanning/main.rs b/examples/sync_from_block_scanning/main.rs index 27d18184..8d6a498e 100644 --- a/examples/sync_from_block_scanning/main.rs +++ b/examples/sync_from_block_scanning/main.rs @@ -1,6 +1,11 @@ use std::time::Duration; -use alloy::{network::Ethereum, providers::ProviderBuilder, sol, sol_types::SolEvent}; +use alloy::{ + network::Ethereum, + providers::{Provider, ProviderBuilder}, + sol, + sol_types::SolEvent, +}; use alloy_node_bindings::Anvil; use event_scanner::{EventFilter, EventScannerBuilder, Message}; use tokio::time::sleep; @@ -40,9 +45,11 @@ async fn main() -> anyhow::Result<()> { let anvil = Anvil::new().block_time(1).try_spawn()?; let wallet = anvil.wallet(); - let provider = - ProviderBuilder::new().wallet(wallet.unwrap()).connect(anvil.endpoint().as_str()).await?; - let counter_contract = Counter::deploy(provider).await?; + let provider = ProviderBuilder::new() + .wallet(wallet.unwrap()) + .connect(anvil.ws_endpoint_url().as_str()) + .await?; + let counter_contract = Counter::deploy(provider.clone()).await?; let contract_address = counter_contract.address(); @@ -56,10 +63,8 @@ async fn main() -> anyhow::Result<()> { info!("Historical event {} created", i + 1); } - let mut scanner = EventScannerBuilder::sync() - .from_block(0) - .connect_ws::(anvil.ws_endpoint_url()) - .await?; + let mut scanner = + EventScannerBuilder::sync().from_block(0).connect::(provider.root().to_owned()); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/sync_from_latest_scanning/main.rs b/examples/sync_from_latest_scanning/main.rs index f0886bd5..ec237a02 100644 --- a/examples/sync_from_latest_scanning/main.rs +++ b/examples/sync_from_latest_scanning/main.rs @@ -1,4 +1,9 @@ -use alloy::{network::Ethereum, providers::ProviderBuilder, sol, sol_types::SolEvent}; +use alloy::{ + network::Ethereum, + providers::{Provider, ProviderBuilder}, + sol, + sol_types::SolEvent, +}; use alloy_node_bindings::Anvil; use event_scanner::{EventFilter, EventScannerBuilder, Message}; @@ -38,9 +43,11 @@ async fn main() -> anyhow::Result<()> { let anvil = Anvil::new().block_time_f64(0.5).try_spawn()?; let wallet = anvil.wallet(); - let provider = - ProviderBuilder::new().wallet(wallet.unwrap()).connect(anvil.endpoint().as_str()).await?; - let counter_contract = Counter::deploy(provider).await?; + let provider = ProviderBuilder::new() + .wallet(wallet.unwrap()) + .connect(anvil.ws_endpoint_url().as_str()) + .await?; + let counter_contract = Counter::deploy(provider.clone()).await?; let contract_address = counter_contract.address(); @@ -48,10 +55,8 @@ async fn main() -> anyhow::Result<()> { .contract_address(*contract_address) .event(Counter::CountIncreased::SIGNATURE); - let mut client = EventScannerBuilder::sync() - .from_latest(5) - .connect_ws::(anvil.ws_endpoint_url()) - .await?; + let mut client = + EventScannerBuilder::sync().from_latest(5).connect::(provider.root().to_owned()); let mut stream = client.subscribe(increase_filter); diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index e3faa200..e3511a49 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -155,10 +155,10 @@ impl BlockRangeScanner { /// Returns an error if any fallback connection fails pub fn connect( self, - robust_provider: RobustProvider, + provider: impl Into>, ) -> ConnectedBlockRangeScanner { ConnectedBlockRangeScanner { - provider: robust_provider, + provider: provider.into(), max_block_range: self.max_block_range, } } @@ -908,10 +908,8 @@ mod tests { provider.subscribe_blocks(); // --- Zero block confirmations -> stream immediately --- - let client = BlockRangeScanner::new() - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let client = + BlockRangeScanner::new().connect::(provider.root().to_owned()).run()?; let mut stream = client.stream_live(0).await?; @@ -954,15 +952,13 @@ mod tests { async fn stream_from_latest_starts_at_tip_not_confirmed() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; provider.anvil_mine(Some(20), None).await?; let block_confirmations = 5; - let client = BlockRangeScanner::new() - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let client = + BlockRangeScanner::new().connect::(provider.root().to_owned()).run()?; let stream = client.stream_from(BlockNumberOrTag::Latest, block_confirmations).await?; @@ -990,14 +986,12 @@ mod tests { async fn continuous_blocks_if_reorg_less_than_block_confirmation() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; let block_confirmations = 5; - let client = BlockRangeScanner::new() - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let client = + BlockRangeScanner::new().connect::(provider.root().to_owned()).run()?; let mut receiver = client.stream_live(block_confirmations).await?; @@ -1034,14 +1028,12 @@ mod tests { async fn shallow_block_confirmation_does_not_mitigate_reorg() -> anyhow::Result<()> { let anvil = Anvil::new().block_time(1).try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; let block_confirmations = 3; - let client = BlockRangeScanner::new() - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let client = + BlockRangeScanner::new().connect::(provider.root().to_owned()).run()?; let mut receiver = client.stream_live(block_confirmations).await?; @@ -1107,8 +1099,7 @@ mod tests { let client = BlockRangeScanner::new() .max_block_range(30) - .connect_ws::(anvil.ws_endpoint_url()) - .await? + .connect::(provider.root().to_owned()) .run()?; let mut stream = client @@ -1142,8 +1133,7 @@ mod tests { let client = BlockRangeScanner::new() .max_block_range(30) - .connect_ws::(anvil.ws_endpoint_url()) - .await? + .connect::(provider.root().to_owned()) .run()?; let mut stream = client @@ -1171,14 +1161,13 @@ mod tests { async fn historic_mode_respects_blocks_read_per_epoch() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; provider.anvil_mine(Some(100), None).await?; let client = BlockRangeScanner::new() .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) - .await? + .connect::(provider.root().to_owned()) .run()?; // ranges where each batch is of max blocks per epoch size @@ -1208,8 +1197,7 @@ mod tests { // range where blocks per epoch is larger than the number of blocks on chain let client = BlockRangeScanner::new() .max_block_range(200) - .connect_ws::(anvil.ws_endpoint_url()) - .await? + .connect::(provider.root().to_owned()) .run()?; let mut stream = client.stream_historical(0, 20).await?; @@ -1227,13 +1215,12 @@ mod tests { async fn historic_mode_normalises_start_and_end_block() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; provider.anvil_mine(Some(11), None).await?; let client = BlockRangeScanner::new() .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) - .await? + .connect::(provider.root().to_owned()) .run()?; let mut stream = client.stream_historical(10, 0).await?; @@ -1345,14 +1332,13 @@ mod tests { async fn rewind_single_batch_when_epoch_larger_than_range() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; provider.anvil_mine(Some(150), None).await?; let client = BlockRangeScanner::new() .max_block_range(100) - .connect_ws::(anvil.ws_endpoint_url()) - .await? + .connect::(provider.root().to_owned()) .run()?; let mut stream = client.rewind(100, 150).await?; @@ -1369,14 +1355,13 @@ mod tests { { let anvil = Anvil::new().try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; provider.anvil_mine(Some(15), None).await?; let client = BlockRangeScanner::new() .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) - .await? + .connect::(provider.root().to_owned()) .run()?; let mut stream = client.rewind(0, 14).await?; @@ -1394,14 +1379,13 @@ mod tests { async fn rewind_with_remainder_trims_first_batch_to_stream_start() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; provider.anvil_mine(Some(15), None).await?; let client = BlockRangeScanner::new() .max_block_range(4) - .connect_ws::(anvil.ws_endpoint_url()) - .await? + .connect::(provider.root().to_owned()) .run()?; let mut stream = client.rewind(3, 12).await?; @@ -1419,14 +1403,13 @@ mod tests { async fn rewind_single_block_range() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; provider.anvil_mine(Some(15), None).await?; let client = BlockRangeScanner::new() .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) - .await? + .connect::(provider.root().to_owned()) .run()?; let mut stream = client.rewind(7, 7).await?; @@ -1441,14 +1424,13 @@ mod tests { async fn rewind_epoch_of_one_sends_each_block_in_reverse_order() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; provider.anvil_mine(Some(15), None).await?; let client = BlockRangeScanner::new() .max_block_range(1) - .connect_ws::(anvil.ws_endpoint_url()) - .await? + .connect::(provider.root().to_owned()) .run()?; let mut stream = client.rewind(5, 8).await?; @@ -1467,14 +1449,13 @@ mod tests { async fn command_rewind_defaults_latest_to_earliest_batches_correctly() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; // Mine 20 blocks, so the total number of blocks is 21 (including 0th block) provider.anvil_mine(Some(20), None).await?; let client = BlockRangeScanner::new() .max_block_range(7) - .connect_ws::(anvil.ws_endpoint_url()) - .await? + .connect::(provider.root().to_owned()) .run()?; let mut stream = @@ -1492,14 +1473,13 @@ mod tests { async fn command_rewind_handles_start_and_end_in_any_order() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; // Ensure blocks at 3 and 15 exist provider.anvil_mine(Some(16), None).await?; let client = BlockRangeScanner::new() .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) - .await? + .connect::(provider.root().to_owned()) .run()?; let mut stream = client.rewind(15, 3).await?; @@ -1523,11 +1503,12 @@ mod tests { async fn command_rewind_propagates_block_not_found_error() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + // Do not mine up to 999 so start won't exist let client = BlockRangeScanner::new() .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) - .await? + .connect::(provider.root().to_owned()) .run()?; let stream = client.rewind(0, 999).await; diff --git a/src/event_scanner/scanner/mod.rs b/src/event_scanner/scanner/mod.rs index 3f7764b8..8a52f514 100644 --- a/src/event_scanner/scanner/mod.rs +++ b/src/event_scanner/scanner/mod.rs @@ -1,8 +1,6 @@ use alloy::{ eips::BlockNumberOrTag, network::{Ethereum, Network}, - providers::RootProvider, - transports::{TransportResult, http::reqwest::Url}, }; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; @@ -369,8 +367,8 @@ impl EventScannerBuilder { /// Connects to an existing provider. /// /// Final builder method: consumes the builder and returns the built [`EventScanner`]. - pub fn connect(self, provider: RobustProvider) -> EventScanner { - let block_range_scanner = self.block_range_scanner.connect::(provider); + pub fn connect(self, provider: impl Into>) -> EventScanner { + let block_range_scanner = self.block_range_scanner.connect::(provider.into()); EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() } } } @@ -386,7 +384,10 @@ impl EventScanner { #[cfg(test)] mod tests { - use alloy::{providers::mock::Asserter, rpc::client::RpcClient}; + use alloy::{ + providers::{RootProvider, mock::Asserter}, + rpc::client::RpcClient, + }; use super::*; @@ -443,7 +444,7 @@ mod tests { #[tokio::test] async fn test_historic_event_stream_channel_capacity() -> anyhow::Result<()> { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = EventScannerBuilder::historic().connect::(provider).await?; + let mut scanner = EventScannerBuilder::historic().connect::(provider); let _ = scanner.subscribe(EventFilter::new()); diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 487fede3..c4cf671e 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -50,6 +50,12 @@ pub const DEFAULT_MAX_RETRIES: usize = 5; /// Default base delay between retries. pub const DEFAULT_RETRY_INTERVAL: Duration = Duration::from_secs(1); +impl From> for RobustProvider { + fn from(provider: RootProvider) -> Self { + Self::new(provider) + } +} + impl RobustProvider { /// Create a new `RobustProvider` with default settings. #[must_use] diff --git a/tests/common.rs b/tests/common.rs index a4051aed..7b166483 100644 --- a/tests/common.rs +++ b/tests/common.rs @@ -97,8 +97,7 @@ pub async fn setup_live_scanner( let mut scanner = EventScannerBuilder::live() .block_confirmations(confirmations) - .connect_ws(anvil.ws_endpoint_url()) - .await?; + .connect::(provider.clone()); let stream = scanner.subscribe(filter); @@ -116,8 +115,7 @@ pub async fn setup_sync_scanner( let mut scanner = EventScannerBuilder::sync() .from_block(from) .block_confirmations(confirmations) - .connect_ws(anvil.ws_endpoint_url()) - .await?; + .connect::(provider.clone()); let stream = scanner.subscribe(filter); @@ -135,8 +133,7 @@ pub async fn setup_sync_from_latest_scanner( let mut scanner = EventScannerBuilder::sync() .from_latest(latest) .block_confirmations(confirmations) - .connect_ws(anvil.ws_endpoint_url()) - .await?; + .connect::(provider.clone()); let stream = scanner.subscribe(filter); @@ -154,8 +151,7 @@ pub async fn setup_historic_scanner( let mut scanner = EventScannerBuilder::historic() .from_block(from) .to_block(to) - .connect_ws(anvil.ws_endpoint_url()) - .await?; + .connect::(provider.clone()); let stream = scanner.subscribe(filter); @@ -178,7 +174,7 @@ pub async fn setup_latest_scanner( builder = builder.to_block(t); } - let mut scanner = builder.connect_ws(anvil.ws_endpoint_url()).await?; + let mut scanner = builder.connect::(provider.clone()); let stream = scanner.subscribe(filter); @@ -261,7 +257,8 @@ pub fn spawn_anvil(block_time: Option) -> anyhow::Result { pub async fn build_provider(anvil: &AnvilInstance) -> anyhow::Result { let wallet = anvil.wallet().expect("anvil should return a default wallet"); - let provider = ProviderBuilder::new().wallet(wallet).connect(anvil.endpoint().as_str()).await?; + let provider = + ProviderBuilder::new().wallet(wallet).connect(anvil.ws_endpoint_url().as_str()).await?; Ok(provider.root().to_owned()) } diff --git a/tests/latest_events/basic.rs b/tests/latest_events/basic.rs index 05235c00..e066e898 100644 --- a/tests/latest_events/basic.rs +++ b/tests/latest_events/basic.rs @@ -85,7 +85,7 @@ async fn latest_scanner_no_events_returns_empty() -> anyhow::Result<()> { #[tokio::test] async fn latest_scanner_respects_range_subset() -> anyhow::Result<()> { - let (anvil, provider, contract, default_filter) = setup_common(None, None).await?; + let (_, provider, contract, default_filter) = setup_common(None, None).await?; // Mine 6 events, one per tx (auto-mined), then manually mint 2 empty blocks to widen range _ = contract.increase_and_get_meta().await?; _ = contract.increase_and_get_meta().await?; @@ -107,8 +107,7 @@ async fn latest_scanner_respects_range_subset() -> anyhow::Result<()> { let mut scanner_with_range = EventScannerBuilder::latest(10) .from_block(start) .to_block(end) - .connect_ws::(anvil.ws_endpoint_url()) - .await?; + .connect::(provider); let mut stream_with_range = scanner_with_range.subscribe(default_filter); scanner_with_range.start().await?; @@ -284,7 +283,7 @@ async fn latest_scanner_cross_contract_filtering() -> anyhow::Result<()> { #[tokio::test] async fn latest_scanner_large_gaps_and_empty_ranges() -> anyhow::Result<()> { // Manual setup to mine empty blocks - let (anvil, provider, contract, default_filter) = setup_common(None, None).await?; + let (_, provider, contract, default_filter) = setup_common(None, None).await?; // Emit 2 events let mut log_meta = vec![]; @@ -303,8 +302,7 @@ async fn latest_scanner_large_gaps_and_empty_ranges() -> anyhow::Result<()> { let mut scanner_with_range = EventScannerBuilder::latest(5) .from_block(start) .to_block(end) - .connect_ws::(anvil.ws_endpoint_url()) - .await?; + .connect::(provider.root().to_owned()); let mut stream_with_range = scanner_with_range.subscribe(default_filter); scanner_with_range.start().await?; @@ -317,7 +315,7 @@ async fn latest_scanner_large_gaps_and_empty_ranges() -> anyhow::Result<()> { #[tokio::test] async fn latest_scanner_boundary_range_single_block() -> anyhow::Result<()> { - let (anvil, provider, contract, default_filter) = setup_common(None, None).await?; + let (_, provider, contract, default_filter) = setup_common(None, None).await?; _ = contract.increase_and_get_meta().await?; let expected = &[contract.increase_and_get_meta().await?]; @@ -336,8 +334,7 @@ async fn latest_scanner_boundary_range_single_block() -> anyhow::Result<()> { let mut scanner_with_range = EventScannerBuilder::latest(5) .from_block(start) .to_block(end) - .connect_ws::(anvil.ws_endpoint_url()) - .await?; + .connect::(provider.root().to_owned()); let mut stream_with_range = scanner_with_range.subscribe(default_filter); scanner_with_range.start().await?; From 8030514207b37757e37e3a9d5dabd84020e0966d Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 30 Oct 2025 23:23:08 +0900 Subject: [PATCH 072/122] feat: add expect pubsub as rrobust provider doesnt distinguish provider types --- src/robust_provider.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index c4cf671e..bddbbd62 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -188,6 +188,8 @@ impl RobustProvider { /// after exhausting retries or if the call times out. pub async fn subscribe_blocks(&self) -> Result, Error> { info!("eth_subscribe called"); + // We need this otherwise error is not clear + self.provider.client().expect_pubsub_frontend(); let result = self .retry_with_total_timeout( move |provider| async move { provider.subscribe_blocks().await }, From dfa28409af714f3dcf454043386e92b1d80896bb Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 30 Oct 2025 23:23:16 +0900 Subject: [PATCH 073/122] fix: let anvil live long enough --- tests/latest_events/basic.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/latest_events/basic.rs b/tests/latest_events/basic.rs index e066e898..3d4a564a 100644 --- a/tests/latest_events/basic.rs +++ b/tests/latest_events/basic.rs @@ -85,7 +85,7 @@ async fn latest_scanner_no_events_returns_empty() -> anyhow::Result<()> { #[tokio::test] async fn latest_scanner_respects_range_subset() -> anyhow::Result<()> { - let (_, provider, contract, default_filter) = setup_common(None, None).await?; + let (_anvil, provider, contract, default_filter) = setup_common(None, None).await?; // Mine 6 events, one per tx (auto-mined), then manually mint 2 empty blocks to widen range _ = contract.increase_and_get_meta().await?; _ = contract.increase_and_get_meta().await?; @@ -283,7 +283,7 @@ async fn latest_scanner_cross_contract_filtering() -> anyhow::Result<()> { #[tokio::test] async fn latest_scanner_large_gaps_and_empty_ranges() -> anyhow::Result<()> { // Manual setup to mine empty blocks - let (_, provider, contract, default_filter) = setup_common(None, None).await?; + let (_anvil, provider, contract, default_filter) = setup_common(None, None).await?; // Emit 2 events let mut log_meta = vec![]; @@ -315,7 +315,7 @@ async fn latest_scanner_large_gaps_and_empty_ranges() -> anyhow::Result<()> { #[tokio::test] async fn latest_scanner_boundary_range_single_block() -> anyhow::Result<()> { - let (_, provider, contract, default_filter) = setup_common(None, None).await?; + let (_anvil, provider, contract, default_filter) = setup_common(None, None).await?; _ = contract.increase_and_get_meta().await?; let expected = &[contract.increase_and_get_meta().await?]; From 9bf2b7e118ae51ee90bac9640792508685b4d5aa Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 30 Oct 2025 23:32:04 +0900 Subject: [PATCH 074/122] ref: reorg provider logic --- tests/{common.rs => common/mod.rs} | 4 +++- tests/historic_to_live/reorg.rs | 2 +- tests/live_mode/reorg.rs | 20 ++++++++++---------- 3 files changed, 14 insertions(+), 12 deletions(-) rename tests/{common.rs => common/mod.rs} (98%) diff --git a/tests/common.rs b/tests/common/mod.rs similarity index 98% rename from tests/common.rs rename to tests/common/mod.rs index 7b166483..5fcec629 100644 --- a/tests/common.rs +++ b/tests/common/mod.rs @@ -182,7 +182,7 @@ pub async fn setup_latest_scanner( } pub async fn reorg_with_new_count_incr_txs

( - provider: RootProvider, + anvil: AnvilInstance, contract: TestCounter::TestCounterInstance>, num_initial_events: u64, num_new_events: u64, @@ -192,6 +192,8 @@ pub async fn reorg_with_new_count_incr_txs

( where P: Provider + Clone, { + let wallet = anvil.wallet().expect("anvil should return a default wallet"); + let provider = ProviderBuilder::new().wallet(wallet).connect(anvil.endpoint().as_str()).await?; let mut event_tx_hashes = vec![]; for _ in 0..num_initial_events { diff --git a/tests/historic_to_live/reorg.rs b/tests/historic_to_live/reorg.rs index 1cb322e1..19380a64 100644 --- a/tests/historic_to_live/reorg.rs +++ b/tests/historic_to_live/reorg.rs @@ -32,7 +32,7 @@ async fn block_confirmations_mitigate_reorgs_historic_to_live() -> anyhow::Resul let same_block = false; let all_tx_hashes = reorg_with_new_count_incr_txs( - provider.clone(), + setup.anvil, contract.clone(), num_initial_events, num_new_events, diff --git a/tests/live_mode/reorg.rs b/tests/live_mode/reorg.rs index 80676272..4372abf5 100644 --- a/tests/live_mode/reorg.rs +++ b/tests/live_mode/reorg.rs @@ -10,7 +10,7 @@ use event_scanner::{Message, ScannerStatus}; #[tokio::test] async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { - let LiveScannerSetup { provider, contract, scanner, mut stream, anvil: _anvil } = + let LiveScannerSetup { provider: _provider, contract, scanner, mut stream, anvil } = setup_live_scanner(Option::Some(0.1), Option::None, 0).await?; scanner.start().await?; @@ -21,7 +21,7 @@ async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { let same_block = true; let expected_event_tx_hashes = reorg_with_new_count_incr_txs( - provider, + anvil, contract, num_initial_events, num_new_events, @@ -71,7 +71,7 @@ async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { #[tokio::test] async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { - let LiveScannerSetup { provider, contract, scanner, mut stream, anvil: _anvil } = + let LiveScannerSetup { provider: _provider, contract, scanner, mut stream, anvil } = setup_live_scanner(Option::Some(0.1), Option::None, 0).await?; scanner.start().await?; @@ -84,7 +84,7 @@ async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { let same_block = false; let expected_event_tx_hashes = reorg_with_new_count_incr_txs( - provider, + anvil, contract, num_initial_events, num_new_events, @@ -134,7 +134,7 @@ async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { #[tokio::test] async fn reorg_depth_one() -> anyhow::Result<()> { - let LiveScannerSetup { provider, contract, scanner, mut stream, anvil: _anvil } = + let LiveScannerSetup { provider: _provider, contract, scanner, mut stream, anvil } = setup_live_scanner(Option::Some(0.1), Option::None, 0).await?; scanner.start().await?; @@ -146,7 +146,7 @@ async fn reorg_depth_one() -> anyhow::Result<()> { let same_block = true; let expected_event_tx_hashes = reorg_with_new_count_incr_txs( - provider, + anvil, contract, num_initial_events, num_new_events, @@ -196,7 +196,7 @@ async fn reorg_depth_one() -> anyhow::Result<()> { #[tokio::test] async fn reorg_depth_two() -> anyhow::Result<()> { - let LiveScannerSetup { provider, contract, scanner, mut stream, anvil: _anvil } = + let LiveScannerSetup { provider: _provider, contract, scanner, mut stream, anvil } = setup_live_scanner(Option::Some(0.1), Option::None, 0).await?; scanner.start().await?; @@ -208,7 +208,7 @@ async fn reorg_depth_two() -> anyhow::Result<()> { let same_block = true; let expected_event_tx_hashes = reorg_with_new_count_incr_txs( - provider, + anvil, contract, num_initial_events, num_new_events, @@ -260,7 +260,7 @@ async fn reorg_depth_two() -> anyhow::Result<()> { async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { // any reorg ≤ 5 should be invisible to consumers let block_confirmations = 5; - let LiveScannerSetup { provider, contract, scanner, mut stream, anvil: _anvil } = + let LiveScannerSetup { provider, contract, scanner, mut stream, anvil } = setup_live_scanner(Option::Some(1.0), Option::None, block_confirmations).await?; scanner.start().await?; @@ -274,7 +274,7 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { let same_block = true; let all_tx_hashes = reorg_with_new_count_incr_txs( - provider.clone(), + anvil, contract, num_initial_events, num_new_events, From ee8512f86b9136fe8adf0af470fd1f61e62215ac Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 30 Oct 2025 23:34:57 +0900 Subject: [PATCH 075/122] ref: remove comment about test module --- tests/mod.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/mod.rs b/tests/mod.rs index dea8b657..4399084e 100644 --- a/tests/mod.rs +++ b/tests/mod.rs @@ -1,6 +1,3 @@ -// This test module triggers the `test-utils` feature when running -// `cargo test`. Without it, you'd need to manually specify `--features test-utils` -// every time. mod common; mod historic_mode; mod historic_to_live; From 1a5541f5067780104b00bfef6282a6edbdd9d1d5 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 30 Oct 2025 23:44:02 +0900 Subject: [PATCH 076/122] ref: move assert reorg to top for easy debugging --- tests/live_mode/reorg.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/live_mode/reorg.rs b/tests/live_mode/reorg.rs index 4372abf5..09520b8f 100644 --- a/tests/live_mode/reorg.rs +++ b/tests/live_mode/reorg.rs @@ -62,9 +62,9 @@ async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { let _ = timeout(Duration::from_secs(5), event_counting).await; let final_blocks: Vec<_> = event_block_count.lock().await.clone(); + assert!(*reorg_detected.lock().await); assert_eq!(final_blocks.len() as u64, num_initial_events + num_new_events); assert_eq!(final_blocks, expected_event_tx_hashes); - assert!(*reorg_detected.lock().await); Ok(()) } @@ -125,9 +125,9 @@ async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { let _ = timeout(Duration::from_secs(10), event_counting).await; let final_blocks: Vec<_> = event_block_count.lock().await.clone(); + assert!(*reorg_detected.lock().await); assert_eq!(final_blocks.len() as u64, num_initial_events + num_new_events); assert_eq!(final_blocks, expected_event_tx_hashes); - assert!(*reorg_detected.lock().await); Ok(()) } @@ -187,9 +187,9 @@ async fn reorg_depth_one() -> anyhow::Result<()> { _ = timeout(Duration::from_secs(5), event_counting).await; let final_blocks: Vec<_> = event_block_count.lock().await.clone(); + assert!(*reorg_detected.lock().await); assert_eq!(final_blocks.len() as u64, num_initial_events + num_new_events); assert_eq!(final_blocks, expected_event_tx_hashes); - assert!(*reorg_detected.lock().await); Ok(()) } @@ -249,9 +249,9 @@ async fn reorg_depth_two() -> anyhow::Result<()> { _ = timeout(Duration::from_secs(5), event_counting).await; let final_blocks: Vec<_> = event_block_count.lock().await.clone(); + assert!(*reorg_detected.lock().await); assert_eq!(final_blocks.len() as u64, num_initial_events + num_new_events); assert_eq!(final_blocks, expected_event_tx_hashes); - assert!(*reorg_detected.lock().await); Ok(()) } From a717ef4f9a70cc79de3ed4918df37dce19dd926e Mon Sep 17 00:00:00 2001 From: Leo Date: Fri, 31 Oct 2025 00:17:56 +0900 Subject: [PATCH 077/122] ref: pass anvil by reference (prevents being dropped too early) --- tests/common/mod.rs | 2 +- tests/historic_to_live/reorg.rs | 2 +- tests/live_mode/reorg.rs | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 5fcec629..90de3930 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -182,7 +182,7 @@ pub async fn setup_latest_scanner( } pub async fn reorg_with_new_count_incr_txs

( - anvil: AnvilInstance, + anvil: &AnvilInstance, contract: TestCounter::TestCounterInstance>, num_initial_events: u64, num_new_events: u64, diff --git a/tests/historic_to_live/reorg.rs b/tests/historic_to_live/reorg.rs index 19380a64..1ce3f626 100644 --- a/tests/historic_to_live/reorg.rs +++ b/tests/historic_to_live/reorg.rs @@ -32,7 +32,7 @@ async fn block_confirmations_mitigate_reorgs_historic_to_live() -> anyhow::Resul let same_block = false; let all_tx_hashes = reorg_with_new_count_incr_txs( - setup.anvil, + &setup.anvil, contract.clone(), num_initial_events, num_new_events, diff --git a/tests/live_mode/reorg.rs b/tests/live_mode/reorg.rs index 09520b8f..ff762741 100644 --- a/tests/live_mode/reorg.rs +++ b/tests/live_mode/reorg.rs @@ -21,7 +21,7 @@ async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { let same_block = true; let expected_event_tx_hashes = reorg_with_new_count_incr_txs( - anvil, + &anvil, contract, num_initial_events, num_new_events, @@ -84,7 +84,7 @@ async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { let same_block = false; let expected_event_tx_hashes = reorg_with_new_count_incr_txs( - anvil, + &anvil, contract, num_initial_events, num_new_events, @@ -146,7 +146,7 @@ async fn reorg_depth_one() -> anyhow::Result<()> { let same_block = true; let expected_event_tx_hashes = reorg_with_new_count_incr_txs( - anvil, + &anvil, contract, num_initial_events, num_new_events, @@ -208,7 +208,7 @@ async fn reorg_depth_two() -> anyhow::Result<()> { let same_block = true; let expected_event_tx_hashes = reorg_with_new_count_incr_txs( - anvil, + &anvil, contract, num_initial_events, num_new_events, @@ -274,7 +274,7 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { let same_block = true; let all_tx_hashes = reorg_with_new_count_incr_txs( - anvil, + &anvil, contract, num_initial_events, num_new_events, From 3c4ebb1b7f013b44780c0de9ba556ffdeeb8e546 Mon Sep 17 00:00:00 2001 From: Leo Date: Fri, 31 Oct 2025 21:33:04 +0900 Subject: [PATCH 078/122] ref: only connect to robust provider --- README.md | 2 +- examples/historical_scanning/main.rs | 6 +- examples/latest_events_scanning/main.rs | 6 +- examples/live_scanning/main.rs | 5 +- examples/sync_from_block_scanning/main.rs | 5 +- examples/sync_from_latest_scanning/main.rs | 5 +- src/block_range_scanner.rs | 72 +++++++++++----------- src/event_scanner/scanner/mod.rs | 11 ++-- tests/common/mod.rs | 19 +++--- tests/latest_events/basic.rs | 13 ++-- 10 files changed, 81 insertions(+), 63 deletions(-) diff --git a/README.md b/README.md index af9bf34f..2680f478 100644 --- a/README.md +++ b/README.md @@ -113,7 +113,7 @@ Once configured, connect using one of: - `connect_ws::(ws_url)` - `connect_ipc::(path)` -- `connect::(provider)` +- `connect::(robust_provider)` This will connect the `EventScanner` and allow you to create event streams and start scanning in various [modes](#scanning-modes). diff --git a/examples/historical_scanning/main.rs b/examples/historical_scanning/main.rs index 88a5583a..98807cc0 100644 --- a/examples/historical_scanning/main.rs +++ b/examples/historical_scanning/main.rs @@ -6,7 +6,7 @@ use alloy::{ }; use alloy_node_bindings::Anvil; -use event_scanner::{EventFilter, EventScannerBuilder, Message}; +use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; use tokio_stream::StreamExt; use tracing::{error, info}; use tracing_subscriber::EnvFilter; @@ -57,8 +57,8 @@ async fn main() -> anyhow::Result<()> { let _ = counter_contract.increase().send().await?.get_receipt().await?; - let mut scanner = - EventScannerBuilder::historic().connect::(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider.root().clone()); + let mut scanner = EventScannerBuilder::historic().connect::(robust_provider); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/latest_events_scanning/main.rs b/examples/latest_events_scanning/main.rs index 6bf48a5a..af1dd927 100644 --- a/examples/latest_events_scanning/main.rs +++ b/examples/latest_events_scanning/main.rs @@ -5,7 +5,7 @@ use alloy::{ sol_types::SolEvent, }; use alloy_node_bindings::Anvil; -use event_scanner::{EventFilter, EventScannerBuilder, Message}; +use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; use tokio_stream::StreamExt; use tracing::{error, info}; use tracing_subscriber::EnvFilter; @@ -54,8 +54,8 @@ async fn main() -> anyhow::Result<()> { .contract_address(*contract_address) .event(Counter::CountIncreased::SIGNATURE); - let mut scanner = - EventScannerBuilder::latest(5).connect::(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider.root().clone()); + let mut scanner = EventScannerBuilder::latest(5).connect::(robust_provider); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/live_scanning/main.rs b/examples/live_scanning/main.rs index 2d1ae12d..10bf3140 100644 --- a/examples/live_scanning/main.rs +++ b/examples/live_scanning/main.rs @@ -5,7 +5,7 @@ use alloy::{ sol_types::SolEvent, }; use alloy_node_bindings::Anvil; -use event_scanner::{EventFilter, EventScannerBuilder, Message}; +use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; use tokio_stream::StreamExt; use tracing::{error, info}; @@ -55,7 +55,8 @@ async fn main() -> anyhow::Result<()> { .contract_address(*contract_address) .event(Counter::CountIncreased::SIGNATURE); - let mut scanner = EventScannerBuilder::live().connect::(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider.root().clone()); + let mut scanner = EventScannerBuilder::live().connect::(robust_provider); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/sync_from_block_scanning/main.rs b/examples/sync_from_block_scanning/main.rs index 8d6a498e..bbc957b4 100644 --- a/examples/sync_from_block_scanning/main.rs +++ b/examples/sync_from_block_scanning/main.rs @@ -7,7 +7,7 @@ use alloy::{ sol_types::SolEvent, }; use alloy_node_bindings::Anvil; -use event_scanner::{EventFilter, EventScannerBuilder, Message}; +use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; use tokio::time::sleep; use tokio_stream::StreamExt; use tracing::{error, info}; @@ -63,8 +63,9 @@ async fn main() -> anyhow::Result<()> { info!("Historical event {} created", i + 1); } + let robust_provider = RobustProvider::new(provider.root().clone()); let mut scanner = - EventScannerBuilder::sync().from_block(0).connect::(provider.root().to_owned()); + EventScannerBuilder::sync().from_block(0).connect::(robust_provider); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/sync_from_latest_scanning/main.rs b/examples/sync_from_latest_scanning/main.rs index ec237a02..0712b00a 100644 --- a/examples/sync_from_latest_scanning/main.rs +++ b/examples/sync_from_latest_scanning/main.rs @@ -5,7 +5,7 @@ use alloy::{ sol_types::SolEvent, }; use alloy_node_bindings::Anvil; -use event_scanner::{EventFilter, EventScannerBuilder, Message}; +use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; use tokio_stream::StreamExt; use tracing::{error, info}; @@ -55,8 +55,9 @@ async fn main() -> anyhow::Result<()> { .contract_address(*contract_address) .event(Counter::CountIncreased::SIGNATURE); + let robust_provider = RobustProvider::new(provider.root().clone()); let mut client = - EventScannerBuilder::sync().from_latest(5).connect::(provider.root().to_owned()); + EventScannerBuilder::sync().from_latest(5).connect::(robust_provider); let mut stream = client.subscribe(increase_filter); diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index e3511a49..e9edbe48 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -147,20 +147,9 @@ impl BlockRangeScanner { } /// Connects to an existing provider - /// - /// This method also tries to connect any fallback providers (both WebSocket and IPC) - /// - /// # Errors - /// - /// Returns an error if any fallback connection fails - pub fn connect( - self, - provider: impl Into>, - ) -> ConnectedBlockRangeScanner { - ConnectedBlockRangeScanner { - provider: provider.into(), - max_block_range: self.max_block_range, - } + #[must_use] + pub fn connect(self, provider: RobustProvider) -> ConnectedBlockRangeScanner { + ConnectedBlockRangeScanner { provider, max_block_range: self.max_block_range } } } @@ -905,11 +894,12 @@ mod tests { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); + provider.subscribe_blocks(); // --- Zero block confirmations -> stream immediately --- - let client = - BlockRangeScanner::new().connect::(provider.root().to_owned()).run()?; + let client = BlockRangeScanner::new().connect::(robust_provider).run()?; let mut stream = client.stream_live(0).await?; @@ -953,12 +943,12 @@ mod tests { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); provider.anvil_mine(Some(20), None).await?; let block_confirmations = 5; - let client = - BlockRangeScanner::new().connect::(provider.root().to_owned()).run()?; + let client = BlockRangeScanner::new().connect::(robust_provider).run()?; let stream = client.stream_from(BlockNumberOrTag::Latest, block_confirmations).await?; @@ -987,11 +977,11 @@ mod tests { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); let block_confirmations = 5; - let client = - BlockRangeScanner::new().connect::(provider.root().to_owned()).run()?; + let client = BlockRangeScanner::new().connect::(robust_provider).run()?; let mut receiver = client.stream_live(block_confirmations).await?; @@ -1029,11 +1019,11 @@ mod tests { let anvil = Anvil::new().block_time(1).try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); let block_confirmations = 3; - let client = - BlockRangeScanner::new().connect::(provider.root().to_owned()).run()?; + let client = BlockRangeScanner::new().connect::(robust_provider).run()?; let mut receiver = client.stream_live(block_confirmations).await?; @@ -1092,6 +1082,7 @@ mod tests { async fn historical_emits_correction_range_when_reorg_below_end() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); provider.anvil_mine(Some(120), None).await?; @@ -1099,7 +1090,7 @@ mod tests { let client = BlockRangeScanner::new() .max_block_range(30) - .connect::(provider.root().to_owned()) + .connect::(robust_provider) .run()?; let mut stream = client @@ -1126,6 +1117,7 @@ mod tests { async fn historical_emits_correction_range_when_end_num_reorgs() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); provider.anvil_mine(Some(120), None).await?; @@ -1133,7 +1125,7 @@ mod tests { let client = BlockRangeScanner::new() .max_block_range(30) - .connect::(provider.root().to_owned()) + .connect::(robust_provider) .run()?; let mut stream = client @@ -1162,12 +1154,13 @@ mod tests { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); provider.anvil_mine(Some(100), None).await?; let client = BlockRangeScanner::new() .max_block_range(5) - .connect::(provider.root().to_owned()) + .connect::(robust_provider.clone()) .run()?; // ranges where each batch is of max blocks per epoch size @@ -1197,7 +1190,7 @@ mod tests { // range where blocks per epoch is larger than the number of blocks on chain let client = BlockRangeScanner::new() .max_block_range(200) - .connect::(provider.root().to_owned()) + .connect::(robust_provider) .run()?; let mut stream = client.stream_historical(0, 20).await?; @@ -1216,11 +1209,12 @@ mod tests { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); provider.anvil_mine(Some(11), None).await?; let client = BlockRangeScanner::new() .max_block_range(5) - .connect::(provider.root().to_owned()) + .connect::(robust_provider) .run()?; let mut stream = client.stream_historical(10, 0).await?; @@ -1333,12 +1327,13 @@ mod tests { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); provider.anvil_mine(Some(150), None).await?; let client = BlockRangeScanner::new() .max_block_range(100) - .connect::(provider.root().to_owned()) + .connect::(robust_provider) .run()?; let mut stream = client.rewind(100, 150).await?; @@ -1356,12 +1351,13 @@ mod tests { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); provider.anvil_mine(Some(15), None).await?; let client = BlockRangeScanner::new() .max_block_range(5) - .connect::(provider.root().to_owned()) + .connect::(robust_provider) .run()?; let mut stream = client.rewind(0, 14).await?; @@ -1380,12 +1376,13 @@ mod tests { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); provider.anvil_mine(Some(15), None).await?; let client = BlockRangeScanner::new() .max_block_range(4) - .connect::(provider.root().to_owned()) + .connect::(robust_provider) .run()?; let mut stream = client.rewind(3, 12).await?; @@ -1404,12 +1401,13 @@ mod tests { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); provider.anvil_mine(Some(15), None).await?; let client = BlockRangeScanner::new() .max_block_range(5) - .connect::(provider.root().to_owned()) + .connect::(robust_provider) .run()?; let mut stream = client.rewind(7, 7).await?; @@ -1425,12 +1423,13 @@ mod tests { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); provider.anvil_mine(Some(15), None).await?; let client = BlockRangeScanner::new() .max_block_range(1) - .connect::(provider.root().to_owned()) + .connect::(robust_provider) .run()?; let mut stream = client.rewind(5, 8).await?; @@ -1450,12 +1449,13 @@ mod tests { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); // Mine 20 blocks, so the total number of blocks is 21 (including 0th block) provider.anvil_mine(Some(20), None).await?; let client = BlockRangeScanner::new() .max_block_range(7) - .connect::(provider.root().to_owned()) + .connect::(robust_provider) .run()?; let mut stream = @@ -1474,12 +1474,13 @@ mod tests { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); // Ensure blocks at 3 and 15 exist provider.anvil_mine(Some(16), None).await?; let client = BlockRangeScanner::new() .max_block_range(5) - .connect::(provider.root().to_owned()) + .connect::(robust_provider) .run()?; let mut stream = client.rewind(15, 3).await?; @@ -1504,11 +1505,12 @@ mod tests { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); // Do not mine up to 999 so start won't exist let client = BlockRangeScanner::new() .max_block_range(5) - .connect::(provider.root().to_owned()) + .connect::(robust_provider) .run()?; let stream = client.rewind(0, 999).await; diff --git a/src/event_scanner/scanner/mod.rs b/src/event_scanner/scanner/mod.rs index 8a52f514..237a0910 100644 --- a/src/event_scanner/scanner/mod.rs +++ b/src/event_scanner/scanner/mod.rs @@ -367,8 +367,9 @@ impl EventScannerBuilder { /// Connects to an existing provider. /// /// Final builder method: consumes the builder and returns the built [`EventScanner`]. - pub fn connect(self, provider: impl Into>) -> EventScanner { - let block_range_scanner = self.block_range_scanner.connect::(provider.into()); + #[must_use] + pub fn connect(self, provider: RobustProvider) -> EventScanner { + let block_range_scanner = self.block_range_scanner.connect::(provider); EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() } } } @@ -427,7 +428,8 @@ mod tests { #[tokio::test] async fn test_historic_event_stream_listeners_vector_updates() -> anyhow::Result<()> { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = EventScannerBuilder::historic().connect::(provider); + let robust_provider = RobustProvider::new(provider.clone()); + let mut scanner = EventScannerBuilder::historic().connect::(robust_provider); assert!(scanner.listeners.is_empty()); @@ -444,7 +446,8 @@ mod tests { #[tokio::test] async fn test_historic_event_stream_channel_capacity() -> anyhow::Result<()> { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let mut scanner = EventScannerBuilder::historic().connect::(provider); + let robust_provider = RobustProvider::new(provider.clone()); + let mut scanner = EventScannerBuilder::historic().connect::(robust_provider); let _ = scanner.subscribe(EventFilter::new()); diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 90de3930..19c4bf40 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -16,7 +16,7 @@ use alloy::{ use alloy_node_bindings::{Anvil, AnvilInstance}; use event_scanner::{ EventFilter, EventScanner, EventScannerBuilder, Historic, LatestEvents, Live, Message, - SyncFromBlock, SyncFromLatestEvents, test_utils::LogMetadata, + SyncFromBlock, SyncFromLatestEvents, robust_provider::RobustProvider, test_utils::LogMetadata, }; use tokio_stream::wrappers::ReceiverStream; @@ -95,9 +95,11 @@ pub async fn setup_live_scanner( ) -> anyhow::Result + Clone>> { let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); + let mut scanner = EventScannerBuilder::live() .block_confirmations(confirmations) - .connect::(provider.clone()); + .connect::(robust_provider); let stream = scanner.subscribe(filter); @@ -111,11 +113,12 @@ pub async fn setup_sync_scanner( confirmations: u64, ) -> anyhow::Result + Clone>> { let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); let mut scanner = EventScannerBuilder::sync() .from_block(from) .block_confirmations(confirmations) - .connect::(provider.clone()); + .connect::(robust_provider); let stream = scanner.subscribe(filter); @@ -129,11 +132,12 @@ pub async fn setup_sync_from_latest_scanner( confirmations: u64, ) -> anyhow::Result + Clone>> { let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; + let robust_provider = RobustProvider::new(provider.root().clone()); let mut scanner = EventScannerBuilder::sync() .from_latest(latest) .block_confirmations(confirmations) - .connect::(provider.clone()); + .connect::(robust_provider); let stream = scanner.subscribe(filter); @@ -147,11 +151,11 @@ pub async fn setup_historic_scanner( to: BlockNumberOrTag, ) -> anyhow::Result + Clone>> { let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; - + let robust_provider = RobustProvider::new(provider.clone()); let mut scanner = EventScannerBuilder::historic() .from_block(from) .to_block(to) - .connect::(provider.clone()); + .connect::(robust_provider); let stream = scanner.subscribe(filter); @@ -174,7 +178,8 @@ pub async fn setup_latest_scanner( builder = builder.to_block(t); } - let mut scanner = builder.connect::(provider.clone()); + let robust_provider = RobustProvider::new(provider.clone()); + let mut scanner = builder.connect::(robust_provider); let stream = scanner.subscribe(filter); diff --git a/tests/latest_events/basic.rs b/tests/latest_events/basic.rs index 3d4a564a..ea0d08fa 100644 --- a/tests/latest_events/basic.rs +++ b/tests/latest_events/basic.rs @@ -11,7 +11,8 @@ use crate::common::{ TestCounter, TestCounterExt, deploy_counter, setup_common, setup_latest_scanner, }; use event_scanner::{ - EventFilter, EventScannerBuilder, assert_closed, assert_next, test_utils::LogMetadata, + EventFilter, EventScannerBuilder, assert_closed, assert_next, robust_provider::RobustProvider, + test_utils::LogMetadata, }; #[tokio::test] @@ -104,10 +105,12 @@ async fn latest_scanner_respects_range_subset() -> anyhow::Result<()> { let start = BlockNumberOrTag::from(head - 3); let end = BlockNumberOrTag::from(head); + let robust_provider = RobustProvider::new(provider.root().clone()); + let mut scanner_with_range = EventScannerBuilder::latest(10) .from_block(start) .to_block(end) - .connect::(provider); + .connect::(robust_provider); let mut stream_with_range = scanner_with_range.subscribe(default_filter); scanner_with_range.start().await?; @@ -299,10 +302,11 @@ async fn latest_scanner_large_gaps_and_empty_ranges() -> anyhow::Result<()> { let start = BlockNumberOrTag::from(head - 12); let end = BlockNumberOrTag::from(head); + let robust_provider = RobustProvider::new(provider.root().clone()); let mut scanner_with_range = EventScannerBuilder::latest(5) .from_block(start) .to_block(end) - .connect::(provider.root().to_owned()); + .connect::(robust_provider); let mut stream_with_range = scanner_with_range.subscribe(default_filter); scanner_with_range.start().await?; @@ -331,10 +335,11 @@ async fn latest_scanner_boundary_range_single_block() -> anyhow::Result<()> { .unwrap(); let end = start; + let robust_provider = RobustProvider::new(provider.root().clone()); let mut scanner_with_range = EventScannerBuilder::latest(5) .from_block(start) .to_block(end) - .connect::(provider.root().to_owned()); + .connect::(robust_provider); let mut stream_with_range = scanner_with_range.subscribe(default_filter); scanner_with_range.start().await?; From c89da66798999b9d25e8732ce985d2b8e78f453d Mon Sep 17 00:00:00 2001 From: Nenad Date: Mon, 3 Nov 2025 10:16:04 +0100 Subject: [PATCH 079/122] test: fix reorg_rescans_events_within_same_block (#147) --- src/robust_provider.rs | 10 ++-- tests/live_mode/reorg.rs | 103 ++++++++++++++++++--------------------- 2 files changed, 55 insertions(+), 58 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index bddbbd62..2f2cc6ad 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -3,10 +3,10 @@ use std::{future::Future, sync::Arc, time::Duration}; use alloy::{ eips::{BlockId, BlockNumberOrTag}, network::Network, - providers::{Provider, RootProvider}, + providers::{Provider, RootProvider, ext::AnvilApi}, pubsub::Subscription, - rpc::types::{Filter, Log}, - transports::{RpcError, TransportErrorKind}, + rpc::types::{Filter, Log, anvil::ReorgOptions}, + transports::{RpcError, TransportErrorKind, TransportResult}, }; use backon::{ExponentialBuilder, Retryable}; use thiserror::Error; @@ -138,6 +138,10 @@ impl RobustProvider { result } + pub async fn anvil_reorg(&self, options: ReorgOptions) -> TransportResult<()> { + self.provider.anvil_reorg(options).await + } + /// Fetch a block by hash with retry and timeout. /// /// # Errors diff --git a/tests/live_mode/reorg.rs b/tests/live_mode/reorg.rs index ff762741..17ebf360 100644 --- a/tests/live_mode/reorg.rs +++ b/tests/live_mode/reorg.rs @@ -4,67 +4,60 @@ use tokio_stream::StreamExt; use tokio::{sync::Mutex, time::timeout}; -use crate::common::{LiveScannerSetup, reorg_with_new_count_incr_txs, setup_live_scanner}; -use alloy::providers::ext::AnvilApi; -use event_scanner::{Message, ScannerStatus}; +use crate::common::{ + LiveScannerSetup, TestCounter::CountIncreased, reorg_with_new_count_incr_txs, setup_common, + setup_live_scanner, +}; +use alloy::{ + primitives::U256, + providers::{Provider, ext::AnvilApi}, + rpc::types::anvil::{ReorgOptions, TransactionData}, +}; +use event_scanner::{ + EventScannerBuilder, Message, ScannerStatus, assert_empty, assert_next, + robust_provider::RobustProvider, +}; #[tokio::test] async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { - let LiveScannerSetup { provider: _provider, contract, scanner, mut stream, anvil } = - setup_live_scanner(Option::Some(0.1), Option::None, 0).await?; + let (_anvil, provider, contract, filter) = setup_common(None, None).await?; + let provider = RobustProvider::new(provider.root().clone()); + let mut scanner = EventScannerBuilder::live().block_confirmations(0).connect(provider.clone()); + let mut stream = scanner.subscribe(filter); scanner.start().await?; - let num_initial_events = 5; - let num_new_events = 3; - let reorg_depth = 5; - let same_block = true; - - let expected_event_tx_hashes = reorg_with_new_count_incr_txs( - &anvil, - contract, - num_initial_events, - num_new_events, - reorg_depth, - same_block, - ) - .await?; - - let event_block_count = Arc::new(Mutex::new(Vec::new())); - let event_block_count_clone = Arc::clone(&event_block_count); - - let reorg_detected = Arc::new(Mutex::new(false)); - let reorg_detected_clone = reorg_detected.clone(); - - let event_counting = async move { - while let Some(message) = stream.next().await { - match message { - Message::Data(logs) => { - let mut guard = event_block_count_clone.lock().await; - for log in logs { - if let Some(n) = log.transaction_hash { - guard.push(n); - } - } - } - Message::Error(e) => { - panic!("panic with error {e}"); - } - Message::Status(status) => { - if matches!(status, ScannerStatus::ReorgDetected) { - *reorg_detected_clone.lock().await = true; - } - } - } - } - }; - - let _ = timeout(Duration::from_secs(5), event_counting).await; - - let final_blocks: Vec<_> = event_block_count.lock().await.clone(); - assert!(*reorg_detected.lock().await); - assert_eq!(final_blocks.len() as u64, num_initial_events + num_new_events); - assert_eq!(final_blocks, expected_event_tx_hashes); + // emit initial events + for _ in 0..5 { + contract.increase().send().await?.watch().await?; + } + + // assert initial events are emitted as expected + assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(5) }]); + let mut stream = assert_empty!(stream); + + // reorg the chain + let tx_block_pairs = (0..3) + .map(|_| (TransactionData::JSON(contract.increase().into_transaction_request()), 0)) + .collect(); + + provider.anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await?; + + // assert expected messages post-reorg + assert_next!(stream, ScannerStatus::ReorgDetected); + assert_next!( + stream, + &[ + CountIncreased { newCount: U256::from(2) }, + CountIncreased { newCount: U256::from(3) }, + CountIncreased { newCount: U256::from(4) }, + ] + ); + assert_empty!(stream); Ok(()) } From 0ebee926d59be8ec210e2b4d799a9655d1760b3b Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 3 Nov 2025 18:58:33 +0900 Subject: [PATCH 080/122] fix: update ater merge --- src/robust_provider.rs | 15 ++--- tests/block_range_scanner.rs | 116 +++++++++++++---------------------- tests/live/reorg.rs | 2 +- 3 files changed, 50 insertions(+), 83 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 2f2cc6ad..421d6450 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -3,10 +3,10 @@ use std::{future::Future, sync::Arc, time::Duration}; use alloy::{ eips::{BlockId, BlockNumberOrTag}, network::Network, - providers::{Provider, RootProvider, ext::AnvilApi}, + providers::{Provider, RootProvider}, pubsub::Subscription, - rpc::types::{Filter, Log, anvil::ReorgOptions}, - transports::{RpcError, TransportErrorKind, TransportResult}, + rpc::types::{Filter, Log}, + transports::{RpcError, TransportErrorKind}, }; use backon::{ExponentialBuilder, Retryable}; use thiserror::Error; @@ -87,6 +87,11 @@ impl RobustProvider { self } + #[must_use] + pub fn inner(self) -> RootProvider { + self.provider + } + /// Add a fallback provider to the list. /// /// Fallback providers are used when the primary provider times out. @@ -138,10 +143,6 @@ impl RobustProvider { result } - pub async fn anvil_reorg(&self, options: ReorgOptions) -> TransportResult<()> { - self.provider.anvil_reorg(options).await - } - /// Fetch a block by hash with retry and timeout. /// /// # Errors diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index cecb860b..eb99faa5 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -1,13 +1,13 @@ use alloy::{ eips::{BlockId, BlockNumberOrTag}, - network::Ethereum, - providers::{ProviderBuilder, ext::AnvilApi}, + providers::{Provider, ProviderBuilder, ext::AnvilApi}, rpc::types::anvil::ReorgOptions, }; use alloy_node_bindings::Anvil; use event_scanner::{ ScannerError, ScannerStatus, assert_closed, assert_empty, assert_next, block_range_scanner::{BlockRangeScanner, Message}, + robust_provider::RobustProvider, }; use tokio_stream::StreamExt; @@ -15,11 +15,11 @@ use tokio_stream::StreamExt; async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); // --- Zero block confirmations -> stream immediately --- - let client = - BlockRangeScanner::new().connect_ws::(anvil.ws_endpoint_url()).await?.run()?; + let client = BlockRangeScanner::new().connect(robust_provider).run()?; let mut stream = client.stream_live(0).await?; @@ -67,8 +67,9 @@ async fn stream_from_latest_starts_at_tip_not_confirmed() -> anyhow::Result<()> let block_confirmations = 5; - let client = - BlockRangeScanner::new().connect_ws::(anvil.ws_endpoint_url()).await?.run()?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); + + let client = BlockRangeScanner::new().connect(robust_provider).run()?; let stream = client.stream_from(BlockNumberOrTag::Latest, block_confirmations).await?; @@ -100,8 +101,9 @@ async fn continuous_blocks_if_reorg_less_than_block_confirmation() -> anyhow::Re let block_confirmations = 5; - let client = - BlockRangeScanner::new().connect_ws::(anvil.ws_endpoint_url()).await?.run()?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); + + let client = BlockRangeScanner::new().connect(robust_provider).run()?; let mut receiver = client.stream_live(block_confirmations).await?; @@ -142,8 +144,9 @@ async fn shallow_block_confirmation_does_not_mitigate_reorg() -> anyhow::Result< let block_confirmations = 3; - let client = - BlockRangeScanner::new().connect_ws::(anvil.ws_endpoint_url()).await?.run()?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); + + let client = BlockRangeScanner::new().connect(robust_provider).run()?; let mut receiver = client.stream_live(block_confirmations).await?; @@ -207,11 +210,9 @@ async fn historical_emits_correction_range_when_reorg_below_end() -> anyhow::Res let end_num = 110; - let client = BlockRangeScanner::new() - .max_block_range(30) - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); + + let client = BlockRangeScanner::new().max_block_range(30).connect(robust_provider).run()?; let mut stream = client .stream_historical(BlockNumberOrTag::Number(0), BlockNumberOrTag::Number(end_num)) @@ -242,11 +243,8 @@ async fn historical_emits_correction_range_when_end_num_reorgs() -> anyhow::Resu let end_num = 120; - let client = BlockRangeScanner::new() - .max_block_range(30) - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); + let client = BlockRangeScanner::new().max_block_range(30).connect(robust_provider).run()?; let mut stream = client .stream_historical(BlockNumberOrTag::Number(0), BlockNumberOrTag::Number(end_num)) @@ -277,11 +275,9 @@ async fn historic_mode_respects_blocks_read_per_epoch() -> anyhow::Result<()> { provider.anvil_mine(Some(100), None).await?; - let client = BlockRangeScanner::new() - .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); + let client = + BlockRangeScanner::new().max_block_range(5).connect(robust_provider.clone()).run()?; // ranges where each batch is of max blocks per epoch size let mut stream = client.stream_historical(0, 19).await?; @@ -308,11 +304,7 @@ async fn historic_mode_respects_blocks_read_per_epoch() -> anyhow::Result<()> { assert_closed!(stream); // range where blocks per epoch is larger than the number of blocks on chain - let client = BlockRangeScanner::new() - .max_block_range(200) - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let client = BlockRangeScanner::new().max_block_range(200).connect(robust_provider).run()?; let mut stream = client.stream_historical(0, 20).await?; assert_next!(stream, 0..=20); @@ -332,11 +324,8 @@ async fn historic_mode_normalises_start_and_end_block() -> anyhow::Result<()> { let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; provider.anvil_mine(Some(11), None).await?; - let client = BlockRangeScanner::new() - .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); + let client = BlockRangeScanner::new().max_block_range(5).connect(robust_provider).run()?; let mut stream = client.stream_historical(10, 0).await?; assert_next!(stream, 0..=4); @@ -355,11 +344,8 @@ async fn rewind_single_batch_when_epoch_larger_than_range() -> anyhow::Result<() provider.anvil_mine(Some(150), None).await?; - let client = BlockRangeScanner::new() - .max_block_range(100) - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); + let client = BlockRangeScanner::new().max_block_range(100).connect(robust_provider).run()?; let mut stream = client.rewind(100, 150).await?; @@ -378,11 +364,8 @@ async fn rewind_exact_multiple_of_epoch_creates_full_batches_in_reverse() -> any provider.anvil_mine(Some(15), None).await?; - let client = BlockRangeScanner::new() - .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); + let client = BlockRangeScanner::new().max_block_range(5).connect(robust_provider).run()?; let mut stream = client.rewind(0, 14).await?; @@ -403,11 +386,8 @@ async fn rewind_with_remainder_trims_first_batch_to_stream_start() -> anyhow::Re provider.anvil_mine(Some(15), None).await?; - let client = BlockRangeScanner::new() - .max_block_range(4) - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); + let client = BlockRangeScanner::new().max_block_range(4).connect(robust_provider).run()?; let mut stream = client.rewind(3, 12).await?; @@ -428,11 +408,8 @@ async fn rewind_single_block_range() -> anyhow::Result<()> { provider.anvil_mine(Some(15), None).await?; - let client = BlockRangeScanner::new() - .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); + let client = BlockRangeScanner::new().max_block_range(5).connect(robust_provider).run()?; let mut stream = client.rewind(7, 7).await?; @@ -450,11 +427,8 @@ async fn rewind_epoch_of_one_sends_each_block_in_reverse_order() -> anyhow::Resu provider.anvil_mine(Some(15), None).await?; - let client = BlockRangeScanner::new() - .max_block_range(1) - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); + let client = BlockRangeScanner::new().max_block_range(1).connect(robust_provider).run()?; let mut stream = client.rewind(5, 8).await?; @@ -476,11 +450,8 @@ async fn command_rewind_defaults_latest_to_earliest_batches_correctly() -> anyho // Mine 20 blocks, so the total number of blocks is 21 (including 0th block) provider.anvil_mine(Some(20), None).await?; - let client = BlockRangeScanner::new() - .max_block_range(7) - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); + let client = BlockRangeScanner::new().max_block_range(7).connect(robust_provider).run()?; let mut stream = client.rewind(BlockNumberOrTag::Earliest, BlockNumberOrTag::Latest).await?; @@ -500,11 +471,8 @@ async fn command_rewind_handles_start_and_end_in_any_order() -> anyhow::Result<( // Ensure blocks at 3 and 15 exist provider.anvil_mine(Some(16), None).await?; - let client = BlockRangeScanner::new() - .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); + let client = BlockRangeScanner::new().max_block_range(5).connect(robust_provider).run()?; let mut stream = client.rewind(15, 3).await?; @@ -528,11 +496,9 @@ async fn command_rewind_propagates_block_not_found_error() -> anyhow::Result<()> let anvil = Anvil::new().try_spawn()?; // Do not mine up to 999 so start won't exist - let client = BlockRangeScanner::new() - .max_block_range(5) - .connect_ws::(anvil.ws_endpoint_url()) - .await? - .run()?; + let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); + let client = BlockRangeScanner::new().max_block_range(5).connect(robust_provider).run()?; let stream = client.rewind(0, 999).await; diff --git a/tests/live/reorg.rs b/tests/live/reorg.rs index 17ebf360..cb2aa190 100644 --- a/tests/live/reorg.rs +++ b/tests/live/reorg.rs @@ -45,7 +45,7 @@ async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { .map(|_| (TransactionData::JSON(contract.increase().into_transaction_request()), 0)) .collect(); - provider.anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await?; + provider.inner().anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await?; // assert expected messages post-reorg assert_next!(stream, ScannerStatus::ReorgDetected); From 55223e8ded3328474399b2ca13b4064ffde0766b Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 3 Nov 2025 21:05:59 +0900 Subject: [PATCH 081/122] ref: split common into differnet files --- tests/common/mod.rs | 238 +++------------------------------- tests/common/setup_scanner.rs | 150 +++++++++++++++++++++ tests/common/test_counter.rs | 79 +++++++++++ 3 files changed, 244 insertions(+), 223 deletions(-) create mode 100644 tests/common/setup_scanner.rs create mode 100644 tests/common/test_counter.rs diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 19c4bf40..79bcc282 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -2,189 +2,26 @@ #![allow(clippy::missing_panics_doc)] #![allow(missing_docs)] +pub mod setup_scanner; +pub mod test_counter; + +pub(crate) use setup_scanner::{ + setup_common, setup_historic_scanner, setup_latest_scanner, setup_live_scanner, + setup_sync_from_latest_scanner, setup_sync_scanner, +}; +pub(crate) use test_counter::{TestCounter, TestCounterExt, deploy_counter}; + use std::sync::Arc; use alloy::{ eips::BlockNumberOrTag, network::Ethereum, - primitives::{FixedBytes, U256}, - providers::{Provider, ProviderBuilder, RootProvider, ext::AnvilApi}, + primitives::FixedBytes, + providers::{Provider, ProviderBuilder, ext::AnvilApi}, rpc::types::anvil::{ReorgOptions, TransactionData}, - sol, - sol_types::SolEvent, }; use alloy_node_bindings::{Anvil, AnvilInstance}; -use event_scanner::{ - EventFilter, EventScanner, EventScannerBuilder, Historic, LatestEvents, Live, Message, - SyncFromBlock, SyncFromLatestEvents, robust_provider::RobustProvider, test_utils::LogMetadata, -}; -use tokio_stream::wrappers::ReceiverStream; - -// Shared test contract used across integration tests -sol! { - // Built directly with solc 0.8.30+commit.73712a01.Darwin.appleclang - #[sol(rpc, bytecode="608080604052346015576101b0908161001a8239f35b5f80fdfe6080806040526004361015610012575f80fd5b5f3560e01c90816306661abd1461016157508063a87d942c14610145578063d732d955146100ad5763e8927fbc14610048575f80fd5b346100a9575f3660031901126100a9575f5460018101809111610095576020817f7ca2ca9527391044455246730762df008a6b47bbdb5d37a890ef78394535c040925f55604051908152a1005b634e487b7160e01b5f52601160045260245ffd5b5f80fd5b346100a9575f3660031901126100a9575f548015610100575f198101908111610095576020817f53a71f16f53e57416424d0d18ccbd98504d42a6f98fe47b09772d8f357c620ce925f55604051908152a1005b60405162461bcd60e51b815260206004820152601860248201527f436f756e742063616e6e6f74206265206e6567617469766500000000000000006044820152606490fd5b346100a9575f3660031901126100a95760205f54604051908152f35b346100a9575f3660031901126100a9576020905f548152f3fea2646970667358221220471585b420a1ad0093820ff10129ec863f6df4bec186546249391fbc3cdbaa7c64736f6c634300081e0033")] - contract TestCounter { - uint256 public count; - - #[derive(Debug)] - event CountIncreased(uint256 newCount); - #[derive(Debug)] - event CountDecreased(uint256 newCount); - - function increase() public { - count += 1; - emit CountIncreased(count); - } - - function decrease() public { - require(count > 0, "Count cannot be negative"); - count -= 1; - emit CountDecreased(count); - } - - function getCount() public view returns (uint256) { - return count; - } - } -} - -pub struct ScannerSetup -where - P: Provider + Clone, -{ - pub provider: RootProvider, - pub contract: TestCounter::TestCounterInstance>, - pub scanner: S, - pub stream: ReceiverStream, - pub anvil: AnvilInstance, -} - -pub type LiveScannerSetup

= ScannerSetup, P>; -pub type HistoricScannerSetup

= ScannerSetup, P>; -pub type SyncScannerSetup

= ScannerSetup, P>; -pub type SyncFromLatestScannerSetup

= ScannerSetup, P>; -pub type LatestScannerSetup

= ScannerSetup, P>; - -pub async fn setup_common( - block_interval: Option, - filter: Option, -) -> anyhow::Result<( - AnvilInstance, - RootProvider, - TestCounter::TestCounterInstance>, - EventFilter, -)> { - let anvil = spawn_anvil(block_interval)?; - let provider = build_provider(&anvil).await?; - let contract = deploy_counter(Arc::new(provider.clone())).await?; - - let default_filter = EventFilter::new() - .contract_address(*contract.address()) - .event(TestCounter::CountIncreased::SIGNATURE); - - let filter = filter.unwrap_or(default_filter); - - Ok((anvil, provider, contract, filter)) -} - -pub async fn setup_live_scanner( - block_interval: Option, - filter: Option, - confirmations: u64, -) -> anyhow::Result + Clone>> { - let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; - - let robust_provider = RobustProvider::new(provider.root().clone()); - - let mut scanner = EventScannerBuilder::live() - .block_confirmations(confirmations) - .connect::(robust_provider); - - let stream = scanner.subscribe(filter); - - Ok(ScannerSetup { provider, contract, scanner, stream, anvil }) -} - -pub async fn setup_sync_scanner( - block_interval: Option, - filter: Option, - from: impl Into, - confirmations: u64, -) -> anyhow::Result + Clone>> { - let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; - let robust_provider = RobustProvider::new(provider.root().clone()); - - let mut scanner = EventScannerBuilder::sync() - .from_block(from) - .block_confirmations(confirmations) - .connect::(robust_provider); - - let stream = scanner.subscribe(filter); - - Ok(ScannerSetup { provider, contract, scanner, stream, anvil }) -} - -pub async fn setup_sync_from_latest_scanner( - block_interval: Option, - filter: Option, - latest: usize, - confirmations: u64, -) -> anyhow::Result + Clone>> { - let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; - let robust_provider = RobustProvider::new(provider.root().clone()); - - let mut scanner = EventScannerBuilder::sync() - .from_latest(latest) - .block_confirmations(confirmations) - .connect::(robust_provider); - - let stream = scanner.subscribe(filter); - - Ok(ScannerSetup { provider, contract, scanner, stream, anvil }) -} - -pub async fn setup_historic_scanner( - block_interval: Option, - filter: Option, - from: BlockNumberOrTag, - to: BlockNumberOrTag, -) -> anyhow::Result + Clone>> { - let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; - let robust_provider = RobustProvider::new(provider.clone()); - let mut scanner = EventScannerBuilder::historic() - .from_block(from) - .to_block(to) - .connect::(robust_provider); - - let stream = scanner.subscribe(filter); - - Ok(ScannerSetup { provider, contract, scanner, stream, anvil }) -} - -pub async fn setup_latest_scanner( - block_interval: Option, - filter: Option, - count: usize, - from: Option, - to: Option, -) -> anyhow::Result + Clone>> { - let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; - let mut builder = EventScannerBuilder::latest(count); - if let Some(f) = from { - builder = builder.from_block(f); - } - if let Some(t) = to { - builder = builder.to_block(t); - } - - let robust_provider = RobustProvider::new(provider.clone()); - let mut scanner = builder.connect::(robust_provider); - - let stream = scanner.subscribe(filter); - - Ok(ScannerSetup { provider, contract, scanner, stream, anvil }) -} +use event_scanner::robust_provider::RobustProvider; pub async fn reorg_with_new_count_incr_txs

( anvil: &AnvilInstance, @@ -262,55 +99,10 @@ pub fn spawn_anvil(block_time: Option) -> anyhow::Result { Ok(anvil.try_spawn()?) } -pub async fn build_provider(anvil: &AnvilInstance) -> anyhow::Result { +pub async fn build_provider(anvil: &AnvilInstance) -> anyhow::Result> { let wallet = anvil.wallet().expect("anvil should return a default wallet"); let provider = ProviderBuilder::new().wallet(wallet).connect(anvil.ws_endpoint_url().as_str()).await?; - Ok(provider.root().to_owned()) -} - -pub async fn deploy_counter

(provider: P) -> anyhow::Result> -where - P: alloy::providers::Provider + Clone, -{ - let contract = TestCounter::deploy(provider).await?; - Ok(contract) -} - -#[allow(dead_code)] -pub(crate) trait TestCounterExt { - async fn increase_and_get_meta( - &self, - ) -> anyhow::Result>; - async fn decrease_and_get_meta( - &self, - ) -> anyhow::Result>; -} - -impl TestCounterExt for TestCounter::TestCounterInstance> { - async fn increase_and_get_meta( - &self, - ) -> anyhow::Result> { - let receipt = self.increase().send().await?.get_receipt().await?; - let tx_hash = receipt.transaction_hash; - let new_count = receipt.decoded_log::().unwrap().data.newCount; - Ok(LogMetadata { - event: TestCounter::CountIncreased { newCount: U256::from(new_count) }, - address: *self.address(), - tx_hash, - }) - } - - async fn decrease_and_get_meta( - &self, - ) -> anyhow::Result> { - let receipt = self.decrease().send().await?.get_receipt().await?; - let tx_hash = receipt.transaction_hash; - let new_count = receipt.decoded_log::().unwrap().data.newCount; - Ok(LogMetadata { - event: TestCounter::CountDecreased { newCount: U256::from(new_count) }, - address: *self.address(), - tx_hash, - }) - } + let robust_provider = RobustProvider::new(provider.root().to_owned()); + Ok(robust_provider) } diff --git a/tests/common/setup_scanner.rs b/tests/common/setup_scanner.rs new file mode 100644 index 00000000..35e5c77e --- /dev/null +++ b/tests/common/setup_scanner.rs @@ -0,0 +1,150 @@ +use std::sync::Arc; + +use alloy::{ + eips::BlockNumberOrTag, + network::Ethereum, + providers::{Provider, RootProvider}, + sol_types::SolEvent, +}; +use alloy_node_bindings::AnvilInstance; +use event_scanner::{ + EventFilter, EventScanner, EventScannerBuilder, Historic, LatestEvents, Live, Message, + SyncFromBlock, SyncFromLatestEvents, robust_provider::RobustProvider, +}; +use tokio_stream::wrappers::ReceiverStream; + +use crate::common::{ + TestCounter::{self, CountIncreased}, + build_provider, spawn_anvil, + test_counter::deploy_counter, +}; + +pub struct ScannerSetup +where + P: Provider + Clone, +{ + pub provider: RobustProvider, + pub contract: TestCounter::TestCounterInstance>, + pub scanner: S, + pub stream: ReceiverStream, + pub anvil: AnvilInstance, +} + +pub type LiveScannerSetup

= ScannerSetup, P>; +pub type HistoricScannerSetup

= ScannerSetup, P>; +pub type SyncScannerSetup

= ScannerSetup, P>; +pub type SyncFromLatestScannerSetup

= ScannerSetup, P>; +pub type LatestScannerSetup

= ScannerSetup, P>; + +pub async fn setup_common( + block_interval: Option, + filter: Option, +) -> anyhow::Result<( + AnvilInstance, + RobustProvider, + TestCounter::TestCounterInstance>, + EventFilter, +)> { + let anvil = spawn_anvil(block_interval)?; + let provider = build_provider(&anvil).await?; + let contract = deploy_counter(Arc::new(provider.inner().clone())).await?; + + let default_filter = + EventFilter::new().contract_address(*contract.address()).event(CountIncreased::SIGNATURE); + + let filter = filter.unwrap_or(default_filter); + + Ok((anvil, provider, contract, filter)) +} + +pub async fn setup_live_scanner( + block_interval: Option, + filter: Option, + confirmations: u64, +) -> anyhow::Result + Clone>> { + let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; + + let mut scanner = EventScannerBuilder::live() + .block_confirmations(confirmations) + .connect::(provider.clone()); + + let stream = scanner.subscribe(filter); + + Ok(ScannerSetup { provider, contract, scanner, stream, anvil }) +} + +pub async fn setup_sync_scanner( + block_interval: Option, + filter: Option, + from: impl Into, + confirmations: u64, +) -> anyhow::Result + Clone>> { + let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; + + let mut scanner = EventScannerBuilder::sync() + .from_block(from) + .block_confirmations(confirmations) + .connect::(provider.clone()); + + let stream = scanner.subscribe(filter); + + Ok(ScannerSetup { provider, contract, scanner, stream, anvil }) +} + +pub async fn setup_sync_from_latest_scanner( + block_interval: Option, + filter: Option, + latest: usize, + confirmations: u64, +) -> anyhow::Result + Clone>> { + let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; + + let mut scanner = EventScannerBuilder::sync() + .from_latest(latest) + .block_confirmations(confirmations) + .connect::(provider.clone()); + + let stream = scanner.subscribe(filter); + + Ok(ScannerSetup { provider, contract, scanner, stream, anvil }) +} + +pub async fn setup_historic_scanner( + block_interval: Option, + filter: Option, + from: BlockNumberOrTag, + to: BlockNumberOrTag, +) -> anyhow::Result + Clone>> { + let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; + let mut scanner = EventScannerBuilder::historic() + .from_block(from) + .to_block(to) + .connect::(provider.clone()); + + let stream = scanner.subscribe(filter); + + Ok(ScannerSetup { provider, contract, scanner, stream, anvil }) +} + +pub async fn setup_latest_scanner( + block_interval: Option, + filter: Option, + count: usize, + from: Option, + to: Option, +) -> anyhow::Result + Clone>> { + let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; + let mut builder = EventScannerBuilder::latest(count); + if let Some(f) = from { + builder = builder.from_block(f); + } + if let Some(t) = to { + builder = builder.to_block(t); + } + + let mut scanner = builder.connect::(provider.clone()); + + let stream = scanner.subscribe(filter); + + Ok(ScannerSetup { provider, contract, scanner, stream, anvil }) +} diff --git a/tests/common/test_counter.rs b/tests/common/test_counter.rs new file mode 100644 index 00000000..155ce9a3 --- /dev/null +++ b/tests/common/test_counter.rs @@ -0,0 +1,79 @@ +use std::sync::Arc; + +use alloy::{network::Ethereum, primitives::U256, providers::Provider, sol}; +use event_scanner::test_utils::LogMetadata; + +// Shared test contract used across integration tests +sol! { + // Built directly with solc 0.8.30+commit.73712a01.Darwin.appleclang + #[sol(rpc, bytecode="608080604052346015576101b0908161001a8239f35b5f80fdfe6080806040526004361015610012575f80fd5b5f3560e01c90816306661abd1461016157508063a87d942c14610145578063d732d955146100ad5763e8927fbc14610048575f80fd5b346100a9575f3660031901126100a9575f5460018101809111610095576020817f7ca2ca9527391044455246730762df008a6b47bbdb5d37a890ef78394535c040925f55604051908152a1005b634e487b7160e01b5f52601160045260245ffd5b5f80fd5b346100a9575f3660031901126100a9575f548015610100575f198101908111610095576020817f53a71f16f53e57416424d0d18ccbd98504d42a6f98fe47b09772d8f357c620ce925f55604051908152a1005b60405162461bcd60e51b815260206004820152601860248201527f436f756e742063616e6e6f74206265206e6567617469766500000000000000006044820152606490fd5b346100a9575f3660031901126100a95760205f54604051908152f35b346100a9575f3660031901126100a9576020905f548152f3fea2646970667358221220471585b420a1ad0093820ff10129ec863f6df4bec186546249391fbc3cdbaa7c64736f6c634300081e0033")] + contract TestCounter { + uint256 public count; + + #[derive(Debug)] + event CountIncreased(uint256 newCount); + #[derive(Debug)] + event CountDecreased(uint256 newCount); + + function increase() public { + count += 1; + emit CountIncreased(count); + } + + function decrease() public { + require(count > 0, "Count cannot be negative"); + count -= 1; + emit CountDecreased(count); + } + + function getCount() public view returns (uint256) { + return count; + } + } +} + +pub async fn deploy_counter

(provider: P) -> anyhow::Result> +where + P: alloy::providers::Provider + Clone, +{ + let contract = TestCounter::deploy(provider).await?; + Ok(contract) +} + +#[allow(dead_code)] +pub(crate) trait TestCounterExt { + async fn increase_and_get_meta( + &self, + ) -> anyhow::Result>; + async fn decrease_and_get_meta( + &self, + ) -> anyhow::Result>; +} + +impl TestCounterExt for TestCounter::TestCounterInstance> { + async fn increase_and_get_meta( + &self, + ) -> anyhow::Result> { + let receipt = self.increase().send().await?.get_receipt().await?; + let tx_hash = receipt.transaction_hash; + let new_count = receipt.decoded_log::().unwrap().data.newCount; + Ok(LogMetadata { + event: TestCounter::CountIncreased { newCount: U256::from(new_count) }, + address: *self.address(), + tx_hash, + }) + } + + async fn decrease_and_get_meta( + &self, + ) -> anyhow::Result> { + let receipt = self.decrease().send().await?.get_receipt().await?; + let tx_hash = receipt.transaction_hash; + let new_count = receipt.decoded_log::().unwrap().data.newCount; + Ok(LogMetadata { + event: TestCounter::CountDecreased { newCount: U256::from(new_count) }, + address: *self.address(), + tx_hash, + }) + } +} From 4fe722a46e06e479165eafc9aad428c067c5173e Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 3 Nov 2025 21:06:24 +0900 Subject: [PATCH 082/122] feat: add inner --- src/robust_provider.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 421d6450..154c3016 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -87,9 +87,10 @@ impl RobustProvider { self } + /// Get a reference to the inner provider #[must_use] - pub fn inner(self) -> RootProvider { - self.provider + pub fn inner(&self) -> &RootProvider { + &self.provider } /// Add a fallback provider to the list. From 0160914d9dbe0b065be03b6096a2a9e4f8e7b10b Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 3 Nov 2025 21:06:38 +0900 Subject: [PATCH 083/122] ref: use inner --- tests/sync/from_block.rs | 4 ++-- tests/sync/from_latest.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/sync/from_block.rs b/tests/sync/from_block.rs index b692150b..12358786 100644 --- a/tests/sync/from_block.rs +++ b/tests/sync/from_block.rs @@ -84,7 +84,7 @@ async fn block_confirmations_mitigate_reorgs_historic_to_live() -> anyhow::Resul let provider = setup.provider.clone(); let contract = setup.contract.clone(); - provider.anvil_mine(Some(10), None).await?; + provider.inner().anvil_mine(Some(10), None).await?; let scanner = setup.scanner; let mut stream = setup.stream; @@ -107,7 +107,7 @@ async fn block_confirmations_mitigate_reorgs_historic_to_live() -> anyhow::Resul ) .await?; - provider.anvil_mine(Some(10), None).await?; + provider.inner().anvil_mine(Some(10), None).await?; let observed_tx_hashes = Arc::new(Mutex::new(Vec::new())); let observed_tx_hashes_clone = Arc::clone(&observed_tx_hashes); diff --git a/tests/sync/from_latest.rs b/tests/sync/from_latest.rs index 069cb5f4..f0d8eb5f 100644 --- a/tests/sync/from_latest.rs +++ b/tests/sync/from_latest.rs @@ -126,12 +126,12 @@ async fn scan_latest_then_live_boundary_no_duplication() -> anyhow::Result<()> { let mut expected_latest = vec![]; expected_latest.push(contract.increase_and_get_meta().await?); - provider.anvil_mine(Some(1), None).await?; + provider.inner().anvil_mine(Some(1), None).await?; expected_latest.push(contract.increase_and_get_meta().await?); expected_latest.push(contract.increase_and_get_meta().await?); - provider.anvil_mine(Some(1), None).await?; + provider.inner().anvil_mine(Some(1), None).await?; scanner.start().await?; From a0aa387ac10339d1ea6c17a8e0ef4a11852d840c Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 3 Nov 2025 21:06:58 +0900 Subject: [PATCH 084/122] ref: use inner in latest_event --- tests/latest_events/basic.rs | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/tests/latest_events/basic.rs b/tests/latest_events/basic.rs index ea0d08fa..17727d2d 100644 --- a/tests/latest_events/basic.rs +++ b/tests/latest_events/basic.rs @@ -11,8 +11,7 @@ use crate::common::{ TestCounter, TestCounterExt, deploy_counter, setup_common, setup_latest_scanner, }; use event_scanner::{ - EventFilter, EventScannerBuilder, assert_closed, assert_next, robust_provider::RobustProvider, - test_utils::LogMetadata, + EventFilter, EventScannerBuilder, assert_closed, assert_next, test_utils::LogMetadata, }; #[tokio::test] @@ -98,19 +97,17 @@ async fn latest_scanner_respects_range_subset() -> anyhow::Result<()> { expected.push(contract.increase_and_get_meta().await?); // manual empty block minting - provider.anvil_mine(Some(2), None).await?; + provider.inner().anvil_mine(Some(2), None).await?; let head = provider.get_block_number().await?; // Choose a subrange covering last 4 blocks let start = BlockNumberOrTag::from(head - 3); let end = BlockNumberOrTag::from(head); - let robust_provider = RobustProvider::new(provider.root().clone()); - let mut scanner_with_range = EventScannerBuilder::latest(10) .from_block(start) .to_block(end) - .connect::(robust_provider); + .connect::(provider); let mut stream_with_range = scanner_with_range.subscribe(default_filter); scanner_with_range.start().await?; @@ -257,8 +254,8 @@ async fn latest_scanner_cross_contract_filtering() -> anyhow::Result<()> { let provider = setup.provider; let mut scanner = setup.scanner; - let contract_a = deploy_counter(Arc::new(provider.clone())).await?; - let contract_b = deploy_counter(Arc::new(provider.clone())).await?; + let contract_a = deploy_counter(Arc::new(provider.inner())).await?; + let contract_b = deploy_counter(Arc::new(provider.inner())).await?; // Listener only for contract A CountIncreased let filter_a = EventFilter::new() @@ -294,7 +291,7 @@ async fn latest_scanner_large_gaps_and_empty_ranges() -> anyhow::Result<()> { log_meta.push(contract.increase_and_get_meta().await?); // Mine 10 empty blocks - provider.anvil_mine(Some(10), None).await?; + provider.inner().anvil_mine(Some(10), None).await?; // Emit 1 more event log_meta.push(contract.increase_and_get_meta().await?); @@ -302,11 +299,10 @@ async fn latest_scanner_large_gaps_and_empty_ranges() -> anyhow::Result<()> { let start = BlockNumberOrTag::from(head - 12); let end = BlockNumberOrTag::from(head); - let robust_provider = RobustProvider::new(provider.root().clone()); let mut scanner_with_range = EventScannerBuilder::latest(5) .from_block(start) .to_block(end) - .connect::(robust_provider); + .connect::(provider); let mut stream_with_range = scanner_with_range.subscribe(default_filter); scanner_with_range.start().await?; @@ -328,6 +324,7 @@ async fn latest_scanner_boundary_range_single_block() -> anyhow::Result<()> { // Pick the expected tx's block number as the block range let expected_tx_hash = expected[0].tx_hash; let start = provider + .inner() .get_transaction_by_hash(expected_tx_hash) .await? .map(|t| t.block_number.unwrap()) @@ -335,11 +332,10 @@ async fn latest_scanner_boundary_range_single_block() -> anyhow::Result<()> { .unwrap(); let end = start; - let robust_provider = RobustProvider::new(provider.root().clone()); let mut scanner_with_range = EventScannerBuilder::latest(5) .from_block(start) .to_block(end) - .connect::(robust_provider); + .connect::(provider); let mut stream_with_range = scanner_with_range.subscribe(default_filter); scanner_with_range.start().await?; From c9c7e2add39bc11b503f11437cfb1808452c488a Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 3 Nov 2025 21:12:13 +0900 Subject: [PATCH 085/122] fix: broken block range scanner tests --- tests/block_range_scanner.rs | 26 +++++++++++++------------- tests/live/basic.rs | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index eb99faa5..bc461377 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -14,16 +14,16 @@ use tokio_stream::StreamExt; #[tokio::test] async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint().as_str()).await?; let robust_provider = RobustProvider::new(provider.root().to_owned()); // --- Zero block confirmations -> stream immediately --- - let client = BlockRangeScanner::new().connect(robust_provider).run()?; + let client = BlockRangeScanner::new().connect(robust_provider.clone()).run()?; let mut stream = client.stream_live(0).await?; - provider.anvil_mine(Some(5), None).await?; + robust_provider.inner().anvil_mine(Some(5), None).await?; assert_next!(stream, 1..=1); assert_next!(stream, 2..=2); @@ -32,7 +32,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh assert_next!(stream, 5..=5); let mut stream = assert_empty!(stream); - provider.anvil_mine(Some(1), None).await?; + robust_provider.inner().anvil_mine(Some(1), None).await?; assert_next!(stream, 6..=6); assert_empty!(stream); @@ -41,7 +41,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh let mut stream = client.stream_live(1).await?; - provider.anvil_mine(Some(5), None).await?; + robust_provider.inner().anvil_mine(Some(5), None).await?; assert_next!(stream, 6..=6); assert_next!(stream, 7..=7); @@ -50,7 +50,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh assert_next!(stream, 10..=10); let mut stream = assert_empty!(stream); - provider.anvil_mine(Some(1), None).await?; + robust_provider.inner().anvil_mine(Some(1), None).await?; assert_next!(stream, 11..=11); assert_empty!(stream); @@ -62,29 +62,29 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh async fn stream_from_latest_starts_at_tip_not_confirmed() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint().as_str()).await?; provider.anvil_mine(Some(20), None).await?; let block_confirmations = 5; let robust_provider = RobustProvider::new(provider.root().to_owned()); - let client = BlockRangeScanner::new().connect(robust_provider).run()?; + let client = BlockRangeScanner::new().connect(robust_provider.clone()).run()?; let stream = client.stream_from(BlockNumberOrTag::Latest, block_confirmations).await?; let stream = assert_empty!(stream); - provider.anvil_mine(Some(4), None).await?; + robust_provider.inner().anvil_mine(Some(4), None).await?; let mut stream = assert_empty!(stream); - provider.anvil_mine(Some(1), None).await?; + robust_provider.inner().anvil_mine(Some(1), None).await?; assert_next!(stream, 20..=20); let mut stream = assert_empty!(stream); - provider.anvil_mine(Some(1), None).await?; + robust_provider.inner().anvil_mine(Some(1), None).await?; assert_next!(stream, 21..=21); assert_empty!(stream); @@ -97,7 +97,7 @@ async fn stream_from_latest_starts_at_tip_not_confirmed() -> anyhow::Result<()> async fn continuous_blocks_if_reorg_less_than_block_confirmation() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint().as_str()).await?; let block_confirmations = 5; @@ -140,7 +140,7 @@ async fn continuous_blocks_if_reorg_less_than_block_confirmation() -> anyhow::Re async fn shallow_block_confirmation_does_not_mitigate_reorg() -> anyhow::Result<()> { let anvil = Anvil::new().block_time(1).try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint().as_str()).await?; let block_confirmations = 3; diff --git a/tests/live/basic.rs b/tests/live/basic.rs index 2b1964cd..99627b99 100644 --- a/tests/live/basic.rs +++ b/tests/live/basic.rs @@ -65,7 +65,7 @@ async fn multiple_contracts_same_event_isolate_callbacks() -> anyhow::Result<()> let setup = setup_live_scanner(Some(0.1), None, 0).await?; let provider = setup.provider.clone(); let a = setup.contract.clone(); - let b = deploy_counter(Arc::new(provider.clone())).await?; + let b = deploy_counter(Arc::new(provider.inner())).await?; let a_filter = EventFilter::new() .contract_address(*a.address()) From acbe4899ee13a8c3e80ac4cc5707ec19e678f456 Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 3 Nov 2025 21:23:16 +0900 Subject: [PATCH 086/122] fix: reorg test --- tests/live/reorg.rs | 165 +++++++++++++++----------------------------- 1 file changed, 55 insertions(+), 110 deletions(-) diff --git a/tests/live/reorg.rs b/tests/live/reorg.rs index cb2aa190..565982e9 100644 --- a/tests/live/reorg.rs +++ b/tests/live/reorg.rs @@ -1,27 +1,23 @@ -use anyhow::Ok; use std::{sync::Arc, time::Duration}; use tokio_stream::StreamExt; +use anyhow::Ok; use tokio::{sync::Mutex, time::timeout}; use crate::common::{ - LiveScannerSetup, TestCounter::CountIncreased, reorg_with_new_count_incr_txs, setup_common, - setup_live_scanner, + TestCounter::CountIncreased, reorg_with_new_count_incr_txs, setup_common, setup_live_scanner, + setup_scanner::LiveScannerSetup, }; use alloy::{ primitives::U256, - providers::{Provider, ext::AnvilApi}, + providers::ext::AnvilApi, rpc::types::anvil::{ReorgOptions, TransactionData}, }; -use event_scanner::{ - EventScannerBuilder, Message, ScannerStatus, assert_empty, assert_next, - robust_provider::RobustProvider, -}; +use event_scanner::{EventScannerBuilder, Message, ScannerStatus, assert_empty, assert_next}; #[tokio::test] async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { let (_anvil, provider, contract, filter) = setup_common(None, None).await?; - let provider = RobustProvider::new(provider.root().clone()); let mut scanner = EventScannerBuilder::live().block_confirmations(0).connect(provider.clone()); let mut stream = scanner.subscribe(filter); @@ -64,125 +60,74 @@ async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { #[tokio::test] async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { - let LiveScannerSetup { provider: _provider, contract, scanner, mut stream, anvil } = - setup_live_scanner(Option::Some(0.1), Option::None, 0).await?; + let (_anvil, provider, contract, filter) = setup_common(None, None).await?; + let mut scanner = EventScannerBuilder::live().block_confirmations(0).connect(provider.clone()); + let mut stream = scanner.subscribe(filter); scanner.start().await?; - let num_initial_events = 5; - - let reorg_depth = 5; - let num_new_events = 3; - // add events in ascending blocks from reorg point - let same_block = false; - - let expected_event_tx_hashes = reorg_with_new_count_incr_txs( - &anvil, - contract, - num_initial_events, - num_new_events, - reorg_depth, - same_block, - ) - .await?; - - let event_block_count = Arc::new(Mutex::new(Vec::new())); - let event_block_count_clone = Arc::clone(&event_block_count); + // emit initial events + for _ in 0..5 { + contract.increase().send().await?.watch().await?; + } - let reorg_detected = Arc::new(Mutex::new(false)); - let reorg_detected_clone = reorg_detected.clone(); + // assert initial events are emitted as expected + assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(5) }]); + let mut stream = assert_empty!(stream); - let event_counting = async move { - while let Some(message) = stream.next().await { - match message { - Message::Data(logs) => { - let mut guard = event_block_count_clone.lock().await; - for log in logs { - if let Some(n) = log.transaction_hash { - guard.push(n); - } - } - } - Message::Error(e) => { - panic!("panic with error {e}"); - } - Message::Status(status) => { - if matches!(status, ScannerStatus::ReorgDetected) { - *reorg_detected_clone.lock().await = true; - } - } - } - } - }; + // reorg the chain - new events in ascending blocks + let tx_block_pairs = vec![ + (TransactionData::JSON(contract.increase().into_transaction_request()), 0), + (TransactionData::JSON(contract.increase().into_transaction_request()), 1), + (TransactionData::JSON(contract.increase().into_transaction_request()), 2), + ]; - let _ = timeout(Duration::from_secs(10), event_counting).await; + provider.inner().anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await?; - let final_blocks: Vec<_> = event_block_count.lock().await.clone(); - assert!(*reorg_detected.lock().await); - assert_eq!(final_blocks.len() as u64, num_initial_events + num_new_events); - assert_eq!(final_blocks, expected_event_tx_hashes); + // assert expected messages post-reorg + assert_next!(stream, ScannerStatus::ReorgDetected); + assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); + assert_empty!(stream); Ok(()) } #[tokio::test] async fn reorg_depth_one() -> anyhow::Result<()> { - let LiveScannerSetup { provider: _provider, contract, scanner, mut stream, anvil } = - setup_live_scanner(Option::Some(0.1), Option::None, 0).await?; + let (_anvil, provider, contract, filter) = setup_common(None, None).await?; + let mut scanner = EventScannerBuilder::live().block_confirmations(0).connect(provider.clone()); + let mut stream = scanner.subscribe(filter); scanner.start().await?; - let num_initial_events = 4; - - let reorg_depth = 1; - let num_new_events = 1; - let same_block = true; - - let expected_event_tx_hashes = reorg_with_new_count_incr_txs( - &anvil, - contract, - num_initial_events, - num_new_events, - reorg_depth, - same_block, - ) - .await?; - - let event_block_count = Arc::new(Mutex::new(Vec::new())); - let event_block_count_clone = Arc::clone(&event_block_count); + // emit initial events + for _ in 0..4 { + contract.increase().send().await?.watch().await?; + } - let reorg_detected = Arc::new(Mutex::new(false)); - let reorg_detected_clone = reorg_detected.clone(); + // assert initial events are emitted as expected + assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); + let mut stream = assert_empty!(stream); - let event_counting = async move { - while let Some(message) = stream.next().await { - match message { - Message::Data(logs) => { - let mut guard = event_block_count_clone.lock().await; - for log in logs { - if let Some(n) = log.transaction_hash { - guard.push(n); - } - } - } - Message::Error(e) => { - panic!("panic with error {e}"); - } - Message::Status(info) => { - if matches!(info, ScannerStatus::ReorgDetected) { - *reorg_detected_clone.lock().await = true; - } - } - } - } - }; + // reorg the chain with depth 1 + let tx_block_pairs = + vec![(TransactionData::JSON(contract.increase().into_transaction_request()), 0)]; - _ = timeout(Duration::from_secs(5), event_counting).await; + provider.inner().anvil_reorg(ReorgOptions { depth: 1, tx_block_pairs }).await?; - let final_blocks: Vec<_> = event_block_count.lock().await.clone(); - assert!(*reorg_detected.lock().await); - assert_eq!(final_blocks.len() as u64, num_initial_events + num_new_events); - assert_eq!(final_blocks, expected_event_tx_hashes); + // assert expected messages post-reorg + assert_next!(stream, ScannerStatus::ReorgDetected); + assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); + assert_empty!(stream); Ok(()) } @@ -258,7 +203,7 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { scanner.start().await?; - provider.anvil_mine(Some(10), None).await?; + provider.inner().anvil_mine(Some(10), None).await?; let num_initial_events = 4_u64; let num_new_events = 2_u64; @@ -276,7 +221,7 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { ) .await?; - provider.anvil_mine(Some(10), None).await?; + provider.inner().anvil_mine(Some(10), None).await?; let observed_tx_hashes = Arc::new(Mutex::new(Vec::new())); let observed_tx_hashes_clone = Arc::clone(&observed_tx_hashes); From 00894e36d8545a7a17a14ca3fb27c74ede5f881e Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 3 Nov 2025 21:31:56 +0900 Subject: [PATCH 087/122] ref: update reorg two test --- tests/live/reorg.rs | 78 +++++++++++++++------------------------------ 1 file changed, 26 insertions(+), 52 deletions(-) diff --git a/tests/live/reorg.rs b/tests/live/reorg.rs index 565982e9..c2843a7d 100644 --- a/tests/live/reorg.rs +++ b/tests/live/reorg.rs @@ -18,7 +18,7 @@ use event_scanner::{EventScannerBuilder, Message, ScannerStatus, assert_empty, a #[tokio::test] async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { let (_anvil, provider, contract, filter) = setup_common(None, None).await?; - let mut scanner = EventScannerBuilder::live().block_confirmations(0).connect(provider.clone()); + let mut scanner = EventScannerBuilder::live().connect(provider.clone()); let mut stream = scanner.subscribe(filter); scanner.start().await?; @@ -61,7 +61,7 @@ async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { #[tokio::test] async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { let (_anvil, provider, contract, filter) = setup_common(None, None).await?; - let mut scanner = EventScannerBuilder::live().block_confirmations(0).connect(provider.clone()); + let mut scanner = EventScannerBuilder::live().connect(provider.clone()); let mut stream = scanner.subscribe(filter); scanner.start().await?; @@ -101,7 +101,7 @@ async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { #[tokio::test] async fn reorg_depth_one() -> anyhow::Result<()> { let (_anvil, provider, contract, filter) = setup_common(None, None).await?; - let mut scanner = EventScannerBuilder::live().block_confirmations(0).connect(provider.clone()); + let mut scanner = EventScannerBuilder::live().connect(provider.clone()); let mut stream = scanner.subscribe(filter); scanner.start().await?; @@ -134,62 +134,36 @@ async fn reorg_depth_one() -> anyhow::Result<()> { #[tokio::test] async fn reorg_depth_two() -> anyhow::Result<()> { - let LiveScannerSetup { provider: _provider, contract, scanner, mut stream, anvil } = - setup_live_scanner(Option::Some(0.1), Option::None, 0).await?; + let (_anvil, provider, contract, filter) = setup_common(None, None).await?; + let mut scanner = EventScannerBuilder::live().block_confirmations(0).connect(provider.clone()); + let mut stream = scanner.subscribe(filter); scanner.start().await?; - let num_initial_events = 4; - - let num_new_events = 1; - let reorg_depth = 2; - - let same_block = true; - let expected_event_tx_hashes = reorg_with_new_count_incr_txs( - &anvil, - contract, - num_initial_events, - num_new_events, - reorg_depth, - same_block, - ) - .await?; - - let event_block_count = Arc::new(Mutex::new(Vec::new())); - let event_block_count_clone = Arc::clone(&event_block_count); + // emit initial events + for _ in 0..4 { + contract.increase().send().await?.watch().await?; + } - let reorg_detected = Arc::new(Mutex::new(false)); - let reorg_detected_clone = reorg_detected.clone(); + // assert initial events are emitted as expected + assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); + let mut stream = assert_empty!(stream); - let event_counting = async move { - while let Some(message) = stream.next().await { - match message { - Message::Data(logs) => { - let mut guard = event_block_count_clone.lock().await; - for log in logs { - if let Some(n) = log.transaction_hash { - guard.push(n); - } - } - } - Message::Error(e) => { - panic!("panic with error {e}"); - } - Message::Status(info) => { - if matches!(info, ScannerStatus::ReorgDetected) { - *reorg_detected_clone.lock().await = true; - } - } - } - } - }; + // reorg the chain with depth 2 + let tx_block_pairs = + vec![(TransactionData::JSON(contract.increase().into_transaction_request()), 0)]; - _ = timeout(Duration::from_secs(5), event_counting).await; + provider.inner().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; - let final_blocks: Vec<_> = event_block_count.lock().await.clone(); - assert!(*reorg_detected.lock().await); - assert_eq!(final_blocks.len() as u64, num_initial_events + num_new_events); - assert_eq!(final_blocks, expected_event_tx_hashes); + // assert expected messages post-reorg + // After reorg depth 2, we rolled back events 3 and 4, so counter is at 2 + // The new event will increment from 2 to 3 + assert_next!(stream, ScannerStatus::ReorgDetected); + assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); + assert_empty!(stream); Ok(()) } From 2221c2f9482b6324b53def7c7144037ade9b8bf2 Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 3 Nov 2025 21:39:25 +0900 Subject: [PATCH 088/122] ref: refactor all reorg tests --- tests/live/reorg.rs | 137 ++++++++++++--------------------------- tests/sync/from_block.rs | 131 ++++++++++++------------------------- 2 files changed, 81 insertions(+), 187 deletions(-) diff --git a/tests/live/reorg.rs b/tests/live/reorg.rs index c2843a7d..08154da6 100644 --- a/tests/live/reorg.rs +++ b/tests/live/reorg.rs @@ -1,19 +1,12 @@ -use std::{sync::Arc, time::Duration}; -use tokio_stream::StreamExt; - use anyhow::Ok; -use tokio::{sync::Mutex, time::timeout}; -use crate::common::{ - TestCounter::CountIncreased, reorg_with_new_count_incr_txs, setup_common, setup_live_scanner, - setup_scanner::LiveScannerSetup, -}; +use crate::common::{TestCounter::CountIncreased, setup_common}; use alloy::{ primitives::U256, providers::ext::AnvilApi, rpc::types::anvil::{ReorgOptions, TransactionData}, }; -use event_scanner::{EventScannerBuilder, Message, ScannerStatus, assert_empty, assert_next}; +use event_scanner::{EventScannerBuilder, ScannerStatus, assert_empty, assert_next}; #[tokio::test] async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { @@ -172,99 +165,51 @@ async fn reorg_depth_two() -> anyhow::Result<()> { async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { // any reorg ≤ 5 should be invisible to consumers let block_confirmations = 5; - let LiveScannerSetup { provider, contract, scanner, mut stream, anvil } = - setup_live_scanner(Option::Some(1.0), Option::None, block_confirmations).await?; + let (_anvil, provider, contract, filter) = setup_common(None, None).await?; + let mut scanner = EventScannerBuilder::live() + .block_confirmations(block_confirmations) + .connect(provider.clone()); + let mut stream = scanner.subscribe(filter); scanner.start().await?; - provider.inner().anvil_mine(Some(10), None).await?; - - let num_initial_events = 4_u64; - let num_new_events = 2_u64; - // reorg depth is less than confirmations -> mitigated - let reorg_depth = 2_u64; - let same_block = true; - - let all_tx_hashes = reorg_with_new_count_incr_txs( - &anvil, - contract, - num_initial_events, - num_new_events, - reorg_depth, - same_block, - ) - .await?; - - provider.inner().anvil_mine(Some(10), None).await?; - - let observed_tx_hashes = Arc::new(Mutex::new(Vec::new())); - let observed_tx_hashes_clone = Arc::clone(&observed_tx_hashes); - - // With sufficient confirmations, a shallow reorg should be fully masked - let reorg_detected = Arc::new(Mutex::new(false)); - let reorg_detected_clone = reorg_detected.clone(); - - let event_counting = async move { - while let Some(message) = stream.next().await { - match message { - Message::Data(logs) => { - let mut guard = observed_tx_hashes_clone.lock().await; - for log in logs { - if let Some(n) = log.transaction_hash { - guard.push(n); - } - } - } - Message::Error(e) => { - panic!("panic with error {e}"); - } - Message::Status(info) => { - if matches!(info, ScannerStatus::ReorgDetected) { - *reorg_detected_clone.lock().await = true; - } - } - } - } - }; - - _ = timeout(Duration::from_secs(10), event_counting).await; - - let final_hashes: Vec<_> = observed_tx_hashes.lock().await.clone(); - - // Split tx hashes [initial_before_reorg | post_reorg] - let (initial_before_reorg, post_reorg) = - all_tx_hashes.split_at(num_initial_events.try_into().unwrap()); - - // Keep only the confirmed portion of the pre-reorg events - let kept_initial = &initial_before_reorg - [..initial_before_reorg.len().saturating_sub(reorg_depth.try_into().unwrap())]; - - // Keep all post-reorg events we injected - let kept_post_reorg = &post_reorg[..num_new_events.try_into().unwrap()]; - - // sanity checks - assert_eq!( - final_hashes.len(), - kept_initial.len() + kept_post_reorg.len(), - "expected count = confirmed pre-reorg + all post-reorg events", - ); + // mine some blocks to establish a baseline + provider.clone().inner().anvil_mine(Some(10), None).await?; - assert!(final_hashes.starts_with(kept_initial), "prefix should be confirmed pre-reorg events",); - assert!( - final_hashes.ends_with(kept_post_reorg), - "suffix should be post-reorg events on new chain", - ); + // emit initial events + for _ in 0..4 { + contract.increase().send().await?.watch().await?; + } - // Full equality for completeness - let mut expected = kept_initial.to_owned().clone(); - let mut post_reorg_clone = kept_post_reorg.to_owned().clone(); - expected.append(&mut post_reorg_clone); - assert_eq!(final_hashes, expected); + // mine enough blocks to confirm first 2 events (but not events 3 and 4) + // Events are in blocks 11, 12, 13, 14. After mining 7 blocks, we're at block 21. + // Block 11 needs to reach block 16 for confirmation (5 confirmations) + // Block 12 needs to reach block 17 for confirmation + // So after 7 more blocks, events 1 and 2 are confirmed, but not 3 and 4 + provider.clone().inner().anvil_mine(Some(7), None).await?; - assert!( - !*reorg_detected.lock().await, - "reorg should be fully mitigated by confirmations (no status emitted)", - ); + // assert first 2 events are emitted (confirmed) + assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(2) }]); + let mut stream = assert_empty!(stream); + + // Now do a reorg of depth 2 (removes blocks with events 3 and 4, which weren't confirmed yet) + // Add 2 new events in the reorged chain + let tx_block_pairs = vec![ + (TransactionData::JSON(contract.increase().into_transaction_request()), 0), + (TransactionData::JSON(contract.increase().into_transaction_request()), 0), + ]; + + provider.clone().inner().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; + + // mine enough blocks to confirm the new events + provider.clone().inner().anvil_mine(Some(10), None).await?; + + // assert the new events from the reorged chain + // No ReorgDetected should be emitted because the reorg happened before confirmation + assert_next!(stream, &[CountIncreased { newCount: U256::from(3) }]); + assert_next!(stream, &[CountIncreased { newCount: U256::from(4) }]); + assert_empty!(stream); Ok(()) } diff --git a/tests/sync/from_block.rs b/tests/sync/from_block.rs index 12358786..00aa1c0e 100644 --- a/tests/sync/from_block.rs +++ b/tests/sync/from_block.rs @@ -1,13 +1,13 @@ -use std::{sync::Arc, time::Duration}; - -use alloy::{eips::BlockNumberOrTag, primitives::U256, providers::ext::AnvilApi}; -use event_scanner::{Message, ScannerStatus, assert_next}; -use tokio::{sync::Mutex, time::timeout}; -use tokio_stream::{StreamExt, wrappers::ReceiverStream}; - -use crate::common::{ - TestCounter, TestCounterExt, reorg_with_new_count_incr_txs, setup_sync_scanner, +use alloy::{ + eips::BlockNumberOrTag, + primitives::U256, + providers::ext::AnvilApi, + rpc::types::anvil::{ReorgOptions, TransactionData}, }; +use event_scanner::{ScannerStatus, assert_empty, assert_next}; +use tokio_stream::wrappers::ReceiverStream; + +use crate::common::{TestCounter, TestCounterExt, setup_sync_scanner}; #[tokio::test] async fn replays_historical_then_switches_to_live() -> anyhow::Result<()> { @@ -84,96 +84,45 @@ async fn block_confirmations_mitigate_reorgs_historic_to_live() -> anyhow::Resul let provider = setup.provider.clone(); let contract = setup.contract.clone(); - provider.inner().anvil_mine(Some(10), None).await?; + // mine some blocks to establish a baseline + provider.clone().inner().anvil_mine(Some(10), None).await?; let scanner = setup.scanner; let mut stream = setup.stream; scanner.start().await?; - // Perform a shallow reorg on the live tail - let num_initial_events = 4u64; - let num_new_events = 2u64; - let reorg_depth = 2u64; - let same_block = false; - - let all_tx_hashes = reorg_with_new_count_incr_txs( - &setup.anvil, - contract.clone(), - num_initial_events, - num_new_events, - reorg_depth, - same_block, - ) - .await?; - - provider.inner().anvil_mine(Some(10), None).await?; - - let observed_tx_hashes = Arc::new(Mutex::new(Vec::new())); - let observed_tx_hashes_clone = Arc::clone(&observed_tx_hashes); - - let reorg_detected = Arc::new(Mutex::new(false)); - let reorg_detected_clone = reorg_detected.clone(); - - let event_counting = async move { - while let Some(message) = stream.next().await { - match message { - Message::Data(logs) => { - let mut guard = observed_tx_hashes_clone.lock().await; - for log in logs { - if let Some(n) = log.transaction_hash { - guard.push(n); - } - } - } - Message::Error(e) => panic!("panic with error {e}"), - Message::Status(info) => { - if matches!(info, ScannerStatus::ReorgDetected) { - *reorg_detected_clone.lock().await = true; - } - } - } - } - }; - - _ = timeout(Duration::from_secs(10), event_counting).await; - - let final_hashes: Vec<_> = observed_tx_hashes.lock().await.clone(); - - // Split tx hashes [initial_before_reorg | post_reorg] - let (initial_before_reorg, post_reorg) = - all_tx_hashes.split_at(num_initial_events.try_into().unwrap()); - - // Keep only the confirmed portion of the pre-reorg events - let kept_initial = &initial_before_reorg - [..initial_before_reorg.len().saturating_sub(reorg_depth.try_into().unwrap())]; - - // Keep all post-reorg events we injected - let kept_post_reorg = &post_reorg[..num_new_events.try_into().unwrap()]; - - // sanity checks - assert_eq!( - final_hashes.len(), - kept_initial.len() + kept_post_reorg.len(), - "expected count = confirmed pre-reorg + all post-reorg events", - ); - - assert!(final_hashes.starts_with(kept_initial), "prefix should be confirmed pre-reorg events",); - assert!( - final_hashes.ends_with(kept_post_reorg), - "suffix should be post-reorg events on new chain", - ); + // emit initial events + for _ in 0..4 { + contract.increase().send().await?.watch().await?; + } - // Full equality for completeness - let mut expected = kept_initial.to_owned().clone(); - let mut post_reorg_clone = kept_post_reorg.to_owned().clone(); - expected.append(&mut post_reorg_clone); - assert_eq!(final_hashes, expected); + // mine enough blocks to confirm first 2 events (but not events 3 and 4) + provider.clone().inner().anvil_mine(Some(7), None).await?; - assert!( - !*reorg_detected.lock().await, - "reorg should be fully mitigated by confirmations (no status emitted)", - ); + // assert first 2 events are emitted (confirmed) + assert_next!(stream, ScannerStatus::SwitchingToLive); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(2) }]); + let mut stream = assert_empty!(stream); + + // Now do a reorg of depth 2 (removes blocks with events 3 and 4, which weren't confirmed yet) + // Add 2 new events in the reorged chain in separate blocks + let tx_block_pairs = vec![ + (TransactionData::JSON(contract.increase().into_transaction_request()), 0), + (TransactionData::JSON(contract.increase().into_transaction_request()), 1), + ]; + + provider.clone().inner().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; + + // mine enough blocks to confirm the new events + provider.clone().inner().anvil_mine(Some(10), None).await?; + + // assert the new events from the reorged chain + // No ReorgDetected should be emitted because the reorg happened before confirmation + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(3) }]); + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(4) }]); + assert_empty!(stream); Ok(()) } From 0587bbef66005770792faf0784321f15dfb36365 Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 3 Nov 2025 21:40:37 +0900 Subject: [PATCH 089/122] ref: remove complex reorg function --- tests/common/mod.rs | 75 +---------------------------------- tests/common/setup_scanner.rs | 1 + 2 files changed, 2 insertions(+), 74 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 79bcc282..2bba4f62 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -11,86 +11,13 @@ pub(crate) use setup_scanner::{ }; pub(crate) use test_counter::{TestCounter, TestCounterExt, deploy_counter}; -use std::sync::Arc; - use alloy::{ - eips::BlockNumberOrTag, network::Ethereum, - primitives::FixedBytes, - providers::{Provider, ProviderBuilder, ext::AnvilApi}, - rpc::types::anvil::{ReorgOptions, TransactionData}, + providers::{Provider, ProviderBuilder}, }; use alloy_node_bindings::{Anvil, AnvilInstance}; use event_scanner::robust_provider::RobustProvider; -pub async fn reorg_with_new_count_incr_txs

( - anvil: &AnvilInstance, - contract: TestCounter::TestCounterInstance>, - num_initial_events: u64, - num_new_events: u64, - reorg_depth: u64, - same_block: bool, -) -> anyhow::Result>> -where - P: Provider + Clone, -{ - let wallet = anvil.wallet().expect("anvil should return a default wallet"); - let provider = ProviderBuilder::new().wallet(wallet).connect(anvil.endpoint().as_str()).await?; - let mut event_tx_hashes = vec![]; - - for _ in 0..num_initial_events { - let receipt = contract.increase().send().await.unwrap().get_receipt().await.unwrap(); - event_tx_hashes.push(receipt.transaction_hash); - } - - let mut tx_block_pairs = vec![]; - for i in 0..num_new_events { - let tx = contract.increase().into_transaction_request(); - tx_block_pairs.push((TransactionData::JSON(tx), if same_block { 0 } else { i })); - } - - let pre_reorg_block = provider.get_block_by_number(BlockNumberOrTag::Latest).await?.unwrap(); - - provider.anvil_reorg(ReorgOptions { depth: reorg_depth, tx_block_pairs }).await.unwrap(); - - let post_reorg_block = provider - .get_block_by_number(BlockNumberOrTag::Number(pre_reorg_block.number())) - .full() - .await? - .unwrap(); - - assert_eq!(post_reorg_block.header.number, pre_reorg_block.header.number); - assert_ne!(post_reorg_block.header.hash, pre_reorg_block.header.hash); - - if same_block { - let new_block = provider - .get_block_by_number(BlockNumberOrTag::Number( - post_reorg_block.header.number - reorg_depth + 1, - )) - .await? - .unwrap(); - assert_eq!(new_block.transactions.len() as u64, num_new_events); - for tx_hash in new_block.transactions.hashes() { - event_tx_hashes.push(tx_hash); - } - } else { - for i in 0..num_new_events { - let new_block = provider - .get_block_by_number(BlockNumberOrTag::Number( - post_reorg_block.header.number - reorg_depth + 1 + i, - )) - .await? - .unwrap(); - assert_eq!(new_block.transactions.len() as u64, 1); - for tx_hash in new_block.transactions.hashes() { - event_tx_hashes.push(tx_hash); - } - } - } - - Ok(event_tx_hashes) -} - pub fn spawn_anvil(block_time: Option) -> anyhow::Result { let mut anvil = Anvil::new(); if let Some(block_time) = block_time { diff --git a/tests/common/setup_scanner.rs b/tests/common/setup_scanner.rs index 35e5c77e..c874e21b 100644 --- a/tests/common/setup_scanner.rs +++ b/tests/common/setup_scanner.rs @@ -27,6 +27,7 @@ where pub contract: TestCounter::TestCounterInstance>, pub scanner: S, pub stream: ReceiverStream, + #[allow(dead_code)] pub anvil: AnvilInstance, } From c1713884cd2576821d9bb1017cf162d3c6179cf2 Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 3 Nov 2025 21:54:16 +0900 Subject: [PATCH 090/122] fix: doctest + readme --- README.md | 37 +++++++++++-------- src/block_range_scanner.rs | 9 ++--- src/event_scanner/scanner/mod.rs | 52 +++++++++++++-------------- src/event_scanner/scanner/sync/mod.rs | 30 ++++++++-------- 4 files changed, 68 insertions(+), 60 deletions(-) diff --git a/README.md b/README.md index 2680f478..3ba811c2 100644 --- a/README.md +++ b/README.md @@ -58,20 +58,24 @@ event-scanner = "0.4.0-alpha" Create an event stream for the given event filters registered with the `EventScanner`: ```rust -use alloy::{network::Ethereum, sol_types::SolEvent}; -use event_scanner::{EventFilter, EventScannerBuilder, Message}; +use alloy::{network::Ethereum, providers::{Provider, ProviderBuilder}, sol_types::SolEvent}; +use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; use tokio_stream::StreamExt; use crate::MyContract; async fn run_scanner( - ws_url: alloy::transports::http::reqwest::Url, + ws_url: &str, contract: alloy::primitives::Address, ) -> Result<(), Box> { + // Connect to provider + let provider = ProviderBuilder::new().connect(ws_url).await?; + let robust_provider = RobustProvider::new(provider.root().to_owned()); + // Configure scanner with custom batch size (optional) let mut scanner = EventScannerBuilder::live() .max_block_range(500) // Process up to 500 blocks per batch - .connect_ws::(ws_url).await?; + .connect(robust_provider); // Register an event listener let filter = EventFilter::new() @@ -108,48 +112,53 @@ async fn run_scanner( ### Building a Scanner -`EventScannerBuilder` provides mode-specific constructors and a functions to configure settings before connecting. -Once configured, connect using one of: +`EventScannerBuilder` provides mode-specific constructors and functions to configure settings before connecting. +Once configured, connect using: -- `connect_ws::(ws_url)` -- `connect_ipc::(path)` -- `connect::(robust_provider)` +- `connect(robust_provider)` - Connect using a `RobustProvider` wrapping your alloy provider This will connect the `EventScanner` and allow you to create event streams and start scanning in various [modes](#scanning-modes). ```rust +use alloy::providers::{Provider, ProviderBuilder}; +use event_scanner::robust_provider::RobustProvider; + +// Connect to provider (example with WebSocket) +let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; +let robust_provider = RobustProvider::new(provider.root().to_owned()); + // Live streaming mode let scanner = EventScannerBuilder::live() .max_block_range(500) // Optional: set max blocks per read (default: 1000) .block_confirmations(12) // Optional: set block confirmations (default: 12) - .connect_ws::(ws_url).await?; + .connect(robust_provider.clone()); // Historical block range mode let scanner = EventScannerBuilder::historic() .from_block(1_000_000) .to_block(2_000_000) .max_block_range(500) - .connect_ws::(ws_url).await?; + .connect(robust_provider.clone()); // Latest events mode let scanner = EventScannerBuilder::latest(100) // .from_block(1_000_000) // Optional: set start of search range // .to_block(2_000_000) // Optional: set end of search range .max_block_range(500) - .connect_ws::(ws_url).await?; + .connect(robust_provider.clone()); // Sync from block then switch to live mode let scanner = EventScannerBuilder::sync() .from_block(100) .max_block_range(500) .block_confirmations(12) - .connect_ws::(ws_url).await?; + .connect(robust_provider.clone()); // Sync the latest 60 events then switch to live mode let scanner = EventScannerBuilder::sync() .from_latest(60) .block_confirmations(12) - .connect_ws::(ws_url).await?; + .connect(robust_provider); ``` Invoking `scanner.start()` starts the scanner in the specified mode. diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index b95c6b04..96c6d081 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -5,13 +5,14 @@ //! use std::ops::RangeInclusive; //! use tokio_stream::{StreamExt, wrappers::ReceiverStream}; //! -//! use alloy::transports::http::reqwest::Url; +//! use alloy::providers::{Provider, ProviderBuilder}; //! use event_scanner::{ //! ScannerError, //! block_range_scanner::{ //! BlockRangeScanner, BlockRangeScannerClient, DEFAULT_BLOCK_CONFIRMATIONS, //! DEFAULT_MAX_BLOCK_RANGE, Message, //! }, +//! robust_provider::RobustProvider, //! }; //! use tokio::time::Duration; //! use tracing::{error, info}; @@ -22,9 +23,9 @@ //! tracing_subscriber::fmt::init(); //! //! // Configuration -//! let block_range_scanner = BlockRangeScanner::new() -//! .connect_ws::(Url::parse("ws://localhost:8546").unwrap()) -//! .await?; +//! let provider = ProviderBuilder::new().connect("ws://localhost:8546").await?; +//! let robust_provider = RobustProvider::new(provider.root().to_owned()); +//! let block_range_scanner = BlockRangeScanner::new().connect(robust_provider); //! //! // Create client to send subscribe command to block scanner //! let client: BlockRangeScannerClient = block_range_scanner.run()?; diff --git a/src/event_scanner/scanner/mod.rs b/src/event_scanner/scanner/mod.rs index 237a0910..a54a4abd 100644 --- a/src/event_scanner/scanner/mod.rs +++ b/src/event_scanner/scanner/mod.rs @@ -77,17 +77,16 @@ impl EventScannerBuilder { /// # Example /// /// ```no_run - /// # use alloy::network::Ethereum; - /// # use event_scanner::{EventFilter, EventScannerBuilder, Message}; + /// # use alloy::{network::Ethereum, providers::{Provider, ProviderBuilder}}; + /// # use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; /// # use tokio_stream::StreamExt; /// # /// # async fn example() -> Result<(), Box> { - /// # let ws_url = "ws://localhost:8545".parse()?; /// # let contract_address = alloy::primitives::address!("0xd8dA6BF26964af9d7eed9e03e53415d37aa96045"); /// // Stream all events from genesis to latest block - /// let mut scanner = EventScannerBuilder::historic() - /// .connect_ws::(ws_url) - /// .await?; + /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; + /// let robust_provider = RobustProvider::new(provider.root().to_owned()); + /// let mut scanner = EventScannerBuilder::historic().connect(robust_provider); /// /// let filter = EventFilter::new().contract_address(contract_address); /// let mut stream = scanner.subscribe(filter); @@ -104,17 +103,17 @@ impl EventScannerBuilder { /// Specifying a custom block range: /// /// ```no_run - /// # use alloy::network::Ethereum; - /// # use event_scanner::EventScannerBuilder; + /// # use alloy::{network::Ethereum, providers::{Provider, ProviderBuilder}}; + /// # use event_scanner::{EventScannerBuilder, robust_provider::RobustProvider}; /// # /// # async fn example() -> Result<(), Box> { - /// # let ws_url = "ws://localhost:8545".parse()?; /// // Stream events between blocks [1_000_000, 2_000_000] + /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; + /// let robust_provider = RobustProvider::new(provider.root().to_owned()); /// let mut scanner = EventScannerBuilder::historic() /// .from_block(1_000_000) /// .to_block(2_000_000) - /// .connect_ws::(ws_url) - /// .await?; + /// .connect(robust_provider); /// # Ok(()) /// # } /// ``` @@ -143,18 +142,18 @@ impl EventScannerBuilder { /// # Example /// /// ```no_run - /// # use alloy::network::Ethereum; - /// # use event_scanner::{EventFilter, EventScannerBuilder, Message}; + /// # use alloy::{network::Ethereum, providers::{Provider, ProviderBuilder}}; + /// # use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; /// # use tokio_stream::StreamExt; /// # /// # async fn example() -> Result<(), Box> { - /// # let ws_url = "ws://localhost:8545".parse()?; /// # let contract_address = alloy::primitives::address!("0xd8dA6BF26964af9d7eed9e03e53415d37aa96045"); /// // Stream new events as they arrive + /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; + /// let robust_provider = RobustProvider::new(provider.root().to_owned()); /// let mut scanner = EventScannerBuilder::live() /// .block_confirmations(20) - /// .connect_ws::(ws_url) - /// .await?; + /// .connect(robust_provider); /// /// let filter = EventFilter::new().contract_address(contract_address); /// let mut stream = scanner.subscribe(filter); @@ -229,17 +228,16 @@ impl EventScannerBuilder { /// # Example /// /// ```no_run - /// # use alloy::{network::Ethereum, primitives::Address}; - /// # use event_scanner::{EventFilter, EventScannerBuilder, Message}; + /// # use alloy::{network::Ethereum, primitives::Address, providers::{Provider, ProviderBuilder}}; + /// # use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; /// # use tokio_stream::StreamExt; /// # /// # async fn example() -> Result<(), Box> { - /// # let ws_url = "ws://localhost:8545".parse()?; /// # let contract_address = alloy::primitives::address!("0xd8dA6BF26964af9d7eed9e03e53415d37aa96045"); /// // Collect the latest 10 events across Earliest..=Latest - /// let mut scanner = EventScannerBuilder::latest(10) - /// .connect_ws::(ws_url) - /// .await?; + /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; + /// let robust_provider = RobustProvider::new(provider.root().to_owned()); + /// let mut scanner = EventScannerBuilder::latest(10).connect(robust_provider); /// /// let filter = EventFilter::new().contract_address(contract_address); /// let mut stream = scanner.subscribe(filter); @@ -257,17 +255,17 @@ impl EventScannerBuilder { /// Restricting to a specific block range: /// /// ```no_run - /// # use alloy::network::Ethereum; - /// # use event_scanner::EventScannerBuilder; + /// # use alloy::{network::Ethereum, providers::{Provider, ProviderBuilder}}; + /// # use event_scanner::{EventScannerBuilder, robust_provider::RobustProvider}; /// # /// # async fn example() -> Result<(), Box> { - /// # let ws_url = "ws://localhost:8545".parse()?; /// // Collect the latest 5 events between blocks [1_000_000, 1_100_000] + /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; + /// let robust_provider = RobustProvider::new(provider.root().to_owned()); /// let mut scanner = EventScannerBuilder::latest(5) /// .from_block(1_000_000) /// .to_block(1_100_000) - /// .connect_ws::(ws_url) - /// .await?; + /// .connect(robust_provider); /// # Ok(()) /// # } /// ``` diff --git a/src/event_scanner/scanner/sync/mod.rs b/src/event_scanner/scanner/sync/mod.rs index 6701752e..116fe00d 100644 --- a/src/event_scanner/scanner/sync/mod.rs +++ b/src/event_scanner/scanner/sync/mod.rs @@ -24,18 +24,18 @@ impl EventScannerBuilder { /// # Example /// /// ```no_run - /// # use alloy::network::Ethereum; - /// # use event_scanner::{EventFilter, EventScannerBuilder, Message}; + /// # use alloy::{network::Ethereum, providers::{Provider, ProviderBuilder}}; + /// # use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; /// # use tokio_stream::StreamExt; /// # /// # async fn example() -> Result<(), Box> { - /// # let ws_url = "ws://localhost:8545".parse()?; /// # let contract_address = alloy::primitives::address!("0xd8dA6BF26964af9d7eed9e03e53415d37aa96045"); /// // Fetch the latest 10 events, then stream new events continuously + /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; + /// let robust_provider = RobustProvider::new(provider.root().to_owned()); /// let mut scanner = EventScannerBuilder::sync() /// .from_latest(10) - /// .connect_ws::(ws_url) - /// .await?; + /// .connect(robust_provider); /// /// let filter = EventFilter::new().contract_address(contract_address); /// let mut stream = scanner.subscribe(filter); @@ -120,18 +120,18 @@ impl EventScannerBuilder { /// # Example /// /// ```no_run - /// # use alloy::network::Ethereum; - /// # use event_scanner::{EventFilter, EventScannerBuilder, Message}; + /// # use alloy::{network::Ethereum, providers::{Provider, ProviderBuilder}}; + /// # use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; /// # use tokio_stream::StreamExt; /// # /// # async fn example() -> Result<(), Box> { - /// # let ws_url = "ws://localhost:8545".parse()?; /// # let contract_address = alloy::primitives::address!("0xd8dA6BF26964af9d7eed9e03e53415d37aa96045"); /// // Sync from block 1_000_000 to present, then stream new events + /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; + /// let robust_provider = RobustProvider::new(provider.root().to_owned()); /// let mut scanner = EventScannerBuilder::sync() /// .from_block(1_000_000) - /// .connect_ws::(ws_url) - /// .await?; + /// .connect(robust_provider); /// /// let filter = EventFilter::new().contract_address(contract_address); /// let mut stream = scanner.subscribe(filter); @@ -159,16 +159,16 @@ impl EventScannerBuilder { /// Using block tags: /// /// ```no_run - /// # use alloy::{network::Ethereum, eips::BlockNumberOrTag}; - /// # use event_scanner::EventScannerBuilder; + /// # use alloy::{network::Ethereum, eips::BlockNumberOrTag, providers::{Provider, ProviderBuilder}}; + /// # use event_scanner::{EventScannerBuilder, robust_provider::RobustProvider}; /// # /// # async fn example() -> Result<(), Box> { - /// # let ws_url = "ws://localhost:8545".parse()?; /// // Sync from genesis block + /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; + /// let robust_provider = RobustProvider::new(provider.root().to_owned()); /// let mut scanner = EventScannerBuilder::sync() /// .from_block(BlockNumberOrTag::Earliest) - /// .connect_ws::(ws_url) - /// .await?; + /// .connect(robust_provider); /// # Ok(()) /// # } /// ``` From e4e46187d3aa38e2030c2658d26d7bec9fbee7ba Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 4 Nov 2025 20:42:25 +0900 Subject: [PATCH 091/122] fix: merge conflicts --- tests/common/mod.rs | 4 ++-- tests/common/setup_scanner.rs | 8 +++----- tests/common/test_counter.rs | 6 ++---- tests/latest_events/basic.rs | 4 ++-- tests/live/basic.rs | 2 +- tests/live/reorg.rs | 19 ++++++++++++------- 6 files changed, 22 insertions(+), 21 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 2bba4f62..1ce4d2fd 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -6,8 +6,8 @@ pub mod setup_scanner; pub mod test_counter; pub(crate) use setup_scanner::{ - setup_common, setup_historic_scanner, setup_latest_scanner, setup_live_scanner, - setup_sync_from_latest_scanner, setup_sync_scanner, + LiveScannerSetup, setup_common, setup_historic_scanner, setup_latest_scanner, + setup_live_scanner, setup_sync_from_latest_scanner, setup_sync_scanner, }; pub(crate) use test_counter::{TestCounter, TestCounterExt, deploy_counter}; diff --git a/tests/common/setup_scanner.rs b/tests/common/setup_scanner.rs index c874e21b..f770f36f 100644 --- a/tests/common/setup_scanner.rs +++ b/tests/common/setup_scanner.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use alloy::{ eips::BlockNumberOrTag, network::Ethereum, @@ -24,7 +22,7 @@ where P: Provider + Clone, { pub provider: RobustProvider, - pub contract: TestCounter::TestCounterInstance>, + pub contract: TestCounter::TestCounterInstance

, pub scanner: S, pub stream: ReceiverStream, #[allow(dead_code)] @@ -43,12 +41,12 @@ pub async fn setup_common( ) -> anyhow::Result<( AnvilInstance, RobustProvider, - TestCounter::TestCounterInstance>, + TestCounter::TestCounterInstance, EventFilter, )> { let anvil = spawn_anvil(block_interval)?; let provider = build_provider(&anvil).await?; - let contract = deploy_counter(Arc::new(provider.inner().clone())).await?; + let contract = deploy_counter(provider.inner().clone()).await?; let default_filter = EventFilter::new().contract_address(*contract.address()).event(CountIncreased::SIGNATURE); diff --git a/tests/common/test_counter.rs b/tests/common/test_counter.rs index 155ce9a3..217338d0 100644 --- a/tests/common/test_counter.rs +++ b/tests/common/test_counter.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use alloy::{network::Ethereum, primitives::U256, providers::Provider, sol}; use event_scanner::test_utils::LogMetadata; @@ -34,7 +32,7 @@ sol! { pub async fn deploy_counter

(provider: P) -> anyhow::Result> where - P: alloy::providers::Provider + Clone, + P: alloy::providers::Provider, { let contract = TestCounter::deploy(provider).await?; Ok(contract) @@ -50,7 +48,7 @@ pub(crate) trait TestCounterExt { ) -> anyhow::Result>; } -impl TestCounterExt for TestCounter::TestCounterInstance> { +impl TestCounterExt for TestCounter::TestCounterInstance

{ async fn increase_and_get_meta( &self, ) -> anyhow::Result> { diff --git a/tests/latest_events/basic.rs b/tests/latest_events/basic.rs index c1b13d9e..522c5694 100644 --- a/tests/latest_events/basic.rs +++ b/tests/latest_events/basic.rs @@ -252,8 +252,8 @@ async fn latest_scanner_cross_contract_filtering() -> anyhow::Result<()> { let provider = setup.provider; let mut scanner = setup.scanner; - let contract_a = deploy_counter(provider.clone()).await?; - let contract_b = deploy_counter(provider.clone()).await?; + let contract_a = deploy_counter(provider.inner().clone()).await?; + let contract_b = deploy_counter(provider.inner().clone()).await?; // Listener only for contract A CountIncreased let filter_a = EventFilter::new() diff --git a/tests/live/basic.rs b/tests/live/basic.rs index 7ed53f51..7253238e 100644 --- a/tests/live/basic.rs +++ b/tests/live/basic.rs @@ -65,7 +65,7 @@ async fn multiple_contracts_same_event_isolate_callbacks() -> anyhow::Result<()> let setup = setup_live_scanner(Some(0.1), None, 0).await?; let provider = setup.provider.clone(); let a = setup.contract.clone(); - let b = deploy_counter(provider.clone()).await?; + let b = deploy_counter(provider.inner().clone()).await?; let a_filter = EventFilter::new() .contract_address(*a.address()) diff --git a/tests/live/reorg.rs b/tests/live/reorg.rs index 70da5446..6532b4cf 100644 --- a/tests/live/reorg.rs +++ b/tests/live/reorg.rs @@ -34,7 +34,8 @@ async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { (TransactionData::JSON(contract.increase().into_transaction_request()), 0), (TransactionData::JSON(contract.increase().into_transaction_request()), 0), ]; - provider.anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await?; + + provider.inner().anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await?; // assert expected messages post-reorg assert_next!(stream, ScannerStatus::ReorgDetected); @@ -77,7 +78,8 @@ async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { (TransactionData::JSON(contract.increase().into_transaction_request()), 1), (TransactionData::JSON(contract.increase().into_transaction_request()), 2), ]; - provider.anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await?; + + provider.inner().anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await?; // assert expected messages post-reorg assert_next!(stream, ScannerStatus::ReorgDetected); @@ -111,7 +113,8 @@ async fn reorg_depth_one() -> anyhow::Result<()> { // reorg the chain let tx_block_pairs = vec![(TransactionData::JSON(contract.increase().into_transaction_request()), 0)]; - provider.anvil_reorg(ReorgOptions { depth: 1, tx_block_pairs }).await?; + + provider.inner().anvil_reorg(ReorgOptions { depth: 1, tx_block_pairs }).await?; // assert expected messages post-reorg assert_next!(stream, ScannerStatus::ReorgDetected); @@ -143,7 +146,8 @@ async fn reorg_depth_two() -> anyhow::Result<()> { // reorg the chain let tx_block_pairs = vec![(TransactionData::JSON(contract.increase().into_transaction_request()), 0)]; - provider.anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; + + provider.inner().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; // assert expected messages post-reorg assert_next!(stream, ScannerStatus::ReorgDetected); @@ -162,7 +166,7 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { scanner.start().await?; // mine some initial blocks - provider.anvil_mine(Some(10), None).await?; + provider.inner().anvil_mine(Some(10), None).await?; // emit initial events for _ in 0..4 { @@ -179,13 +183,14 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { (TransactionData::JSON(contract.increase().into_transaction_request()), 0), (TransactionData::JSON(contract.increase().into_transaction_request()), 0), ]; - provider.anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; + + provider.inner().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; // assert no events have still been streamed let mut stream = assert_empty!(stream); // mine some additional post-reorg blocks - provider.anvil_mine(Some(10), None).await?; + provider.inner().anvil_mine(Some(10), None).await?; // no `ReorgDetected` should be emitted assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); From e3266450fe11800efd192f34b88e8179afd71893 Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 4 Nov 2025 20:50:34 +0900 Subject: [PATCH 092/122] ref: remove to_root() --- README.md | 4 +-- examples/historical_scanning/main.rs | 9 ++---- examples/latest_events_scanning/main.rs | 9 ++---- examples/live_scanning/main.rs | 9 ++---- examples/sync_from_block_scanning/main.rs | 9 ++---- examples/sync_from_latest_scanning/main.rs | 9 ++---- src/block_range_scanner.rs | 2 +- src/event_scanner/scanner/mod.rs | 10 +++---- src/event_scanner/scanner/sync/mod.rs | 6 ++-- src/robust_provider.rs | 10 ++----- tests/block_range_scanner.rs | 34 +++++++++++----------- tests/common/mod.rs | 7 ++--- 12 files changed, 42 insertions(+), 76 deletions(-) diff --git a/README.md b/README.md index a849b784..f3d8a3f6 100644 --- a/README.md +++ b/README.md @@ -71,7 +71,7 @@ async fn run_scanner( ) -> Result<(), Box> { // Connect to provider let provider = ProviderBuilder::new().connect(ws_url).await?; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider); // Configure scanner with custom batch size (optional) let mut scanner = EventScannerBuilder::live() @@ -126,7 +126,7 @@ use event_scanner::robust_provider::RobustProvider; // Connect to provider (example with WebSocket) let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; -let robust_provider = RobustProvider::new(provider.root().to_owned()); +let robust_provider = RobustProvider::new(provider); // Live streaming mode let scanner = EventScannerBuilder::live() diff --git a/examples/historical_scanning/main.rs b/examples/historical_scanning/main.rs index 98807cc0..84db6c29 100644 --- a/examples/historical_scanning/main.rs +++ b/examples/historical_scanning/main.rs @@ -1,9 +1,4 @@ -use alloy::{ - network::Ethereum, - providers::{Provider, ProviderBuilder}, - sol, - sol_types::SolEvent, -}; +use alloy::{network::Ethereum, providers::ProviderBuilder, sol, sol_types::SolEvent}; use alloy_node_bindings::Anvil; use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; @@ -57,7 +52,7 @@ async fn main() -> anyhow::Result<()> { let _ = counter_contract.increase().send().await?.get_receipt().await?; - let robust_provider = RobustProvider::new(provider.root().clone()); + let robust_provider = RobustProvider::new(provider.clone()); let mut scanner = EventScannerBuilder::historic().connect::(robust_provider); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/latest_events_scanning/main.rs b/examples/latest_events_scanning/main.rs index af1dd927..22744b5f 100644 --- a/examples/latest_events_scanning/main.rs +++ b/examples/latest_events_scanning/main.rs @@ -1,9 +1,4 @@ -use alloy::{ - network::Ethereum, - providers::{Provider, ProviderBuilder}, - sol, - sol_types::SolEvent, -}; +use alloy::{network::Ethereum, providers::ProviderBuilder, sol, sol_types::SolEvent}; use alloy_node_bindings::Anvil; use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; use tokio_stream::StreamExt; @@ -54,7 +49,7 @@ async fn main() -> anyhow::Result<()> { .contract_address(*contract_address) .event(Counter::CountIncreased::SIGNATURE); - let robust_provider = RobustProvider::new(provider.root().clone()); + let robust_provider = RobustProvider::new(provider.clone()); let mut scanner = EventScannerBuilder::latest(5).connect::(robust_provider); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/live_scanning/main.rs b/examples/live_scanning/main.rs index 10bf3140..098ce1f8 100644 --- a/examples/live_scanning/main.rs +++ b/examples/live_scanning/main.rs @@ -1,9 +1,4 @@ -use alloy::{ - network::Ethereum, - providers::{Provider, ProviderBuilder}, - sol, - sol_types::SolEvent, -}; +use alloy::{network::Ethereum, providers::ProviderBuilder, sol, sol_types::SolEvent}; use alloy_node_bindings::Anvil; use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; @@ -55,7 +50,7 @@ async fn main() -> anyhow::Result<()> { .contract_address(*contract_address) .event(Counter::CountIncreased::SIGNATURE); - let robust_provider = RobustProvider::new(provider.root().clone()); + let robust_provider = RobustProvider::new(provider.clone()); let mut scanner = EventScannerBuilder::live().connect::(robust_provider); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/sync_from_block_scanning/main.rs b/examples/sync_from_block_scanning/main.rs index bbc957b4..aba1c518 100644 --- a/examples/sync_from_block_scanning/main.rs +++ b/examples/sync_from_block_scanning/main.rs @@ -1,11 +1,6 @@ use std::time::Duration; -use alloy::{ - network::Ethereum, - providers::{Provider, ProviderBuilder}, - sol, - sol_types::SolEvent, -}; +use alloy::{network::Ethereum, providers::ProviderBuilder, sol, sol_types::SolEvent}; use alloy_node_bindings::Anvil; use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; use tokio::time::sleep; @@ -63,7 +58,7 @@ async fn main() -> anyhow::Result<()> { info!("Historical event {} created", i + 1); } - let robust_provider = RobustProvider::new(provider.root().clone()); + let robust_provider = RobustProvider::new(provider.clone()); let mut scanner = EventScannerBuilder::sync().from_block(0).connect::(robust_provider); diff --git a/examples/sync_from_latest_scanning/main.rs b/examples/sync_from_latest_scanning/main.rs index 0712b00a..00d9d1da 100644 --- a/examples/sync_from_latest_scanning/main.rs +++ b/examples/sync_from_latest_scanning/main.rs @@ -1,9 +1,4 @@ -use alloy::{ - network::Ethereum, - providers::{Provider, ProviderBuilder}, - sol, - sol_types::SolEvent, -}; +use alloy::{network::Ethereum, providers::ProviderBuilder, sol, sol_types::SolEvent}; use alloy_node_bindings::Anvil; use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; @@ -55,7 +50,7 @@ async fn main() -> anyhow::Result<()> { .contract_address(*contract_address) .event(Counter::CountIncreased::SIGNATURE); - let robust_provider = RobustProvider::new(provider.root().clone()); + let robust_provider = RobustProvider::new(provider.clone()); let mut client = EventScannerBuilder::sync().from_latest(5).connect::(robust_provider); diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index 96c6d081..b12423b5 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -24,7 +24,7 @@ //! //! // Configuration //! let provider = ProviderBuilder::new().connect("ws://localhost:8546").await?; -//! let robust_provider = RobustProvider::new(provider.root().to_owned()); +//! let robust_provider = RobustProvider::new(provider); //! let block_range_scanner = BlockRangeScanner::new().connect(robust_provider); //! //! // Create client to send subscribe command to block scanner diff --git a/src/event_scanner/scanner/mod.rs b/src/event_scanner/scanner/mod.rs index a54a4abd..fc80f03b 100644 --- a/src/event_scanner/scanner/mod.rs +++ b/src/event_scanner/scanner/mod.rs @@ -85,7 +85,7 @@ impl EventScannerBuilder { /// # let contract_address = alloy::primitives::address!("0xd8dA6BF26964af9d7eed9e03e53415d37aa96045"); /// // Stream all events from genesis to latest block /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; - /// let robust_provider = RobustProvider::new(provider.root().to_owned()); + /// let robust_provider = RobustProvider::new(provider); /// let mut scanner = EventScannerBuilder::historic().connect(robust_provider); /// /// let filter = EventFilter::new().contract_address(contract_address); @@ -109,7 +109,7 @@ impl EventScannerBuilder { /// # async fn example() -> Result<(), Box> { /// // Stream events between blocks [1_000_000, 2_000_000] /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; - /// let robust_provider = RobustProvider::new(provider.root().to_owned()); + /// let robust_provider = RobustProvider::new(provider); /// let mut scanner = EventScannerBuilder::historic() /// .from_block(1_000_000) /// .to_block(2_000_000) @@ -150,7 +150,7 @@ impl EventScannerBuilder { /// # let contract_address = alloy::primitives::address!("0xd8dA6BF26964af9d7eed9e03e53415d37aa96045"); /// // Stream new events as they arrive /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; - /// let robust_provider = RobustProvider::new(provider.root().to_owned()); + /// let robust_provider = RobustProvider::new(provider); /// let mut scanner = EventScannerBuilder::live() /// .block_confirmations(20) /// .connect(robust_provider); @@ -236,7 +236,7 @@ impl EventScannerBuilder { /// # let contract_address = alloy::primitives::address!("0xd8dA6BF26964af9d7eed9e03e53415d37aa96045"); /// // Collect the latest 10 events across Earliest..=Latest /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; - /// let robust_provider = RobustProvider::new(provider.root().to_owned()); + /// let robust_provider = RobustProvider::new(provider); /// let mut scanner = EventScannerBuilder::latest(10).connect(robust_provider); /// /// let filter = EventFilter::new().contract_address(contract_address); @@ -261,7 +261,7 @@ impl EventScannerBuilder { /// # async fn example() -> Result<(), Box> { /// // Collect the latest 5 events between blocks [1_000_000, 1_100_000] /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; - /// let robust_provider = RobustProvider::new(provider.root().to_owned()); + /// let robust_provider = RobustProvider::new(provider); /// let mut scanner = EventScannerBuilder::latest(5) /// .from_block(1_000_000) /// .to_block(1_100_000) diff --git a/src/event_scanner/scanner/sync/mod.rs b/src/event_scanner/scanner/sync/mod.rs index 116fe00d..0bc60802 100644 --- a/src/event_scanner/scanner/sync/mod.rs +++ b/src/event_scanner/scanner/sync/mod.rs @@ -32,7 +32,7 @@ impl EventScannerBuilder { /// # let contract_address = alloy::primitives::address!("0xd8dA6BF26964af9d7eed9e03e53415d37aa96045"); /// // Fetch the latest 10 events, then stream new events continuously /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; - /// let robust_provider = RobustProvider::new(provider.root().to_owned()); + /// let robust_provider = RobustProvider::new(provider); /// let mut scanner = EventScannerBuilder::sync() /// .from_latest(10) /// .connect(robust_provider); @@ -128,7 +128,7 @@ impl EventScannerBuilder { /// # let contract_address = alloy::primitives::address!("0xd8dA6BF26964af9d7eed9e03e53415d37aa96045"); /// // Sync from block 1_000_000 to present, then stream new events /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; - /// let robust_provider = RobustProvider::new(provider.root().to_owned()); + /// let robust_provider = RobustProvider::new(provider); /// let mut scanner = EventScannerBuilder::sync() /// .from_block(1_000_000) /// .connect(robust_provider); @@ -165,7 +165,7 @@ impl EventScannerBuilder { /// # async fn example() -> Result<(), Box> { /// // Sync from genesis block /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; - /// let robust_provider = RobustProvider::new(provider.root().to_owned()); + /// let robust_provider = RobustProvider::new(provider); /// let mut scanner = EventScannerBuilder::sync() /// .from_block(BlockNumberOrTag::Earliest) /// .connect(robust_provider); diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 154c3016..2b44c339 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -50,18 +50,12 @@ pub const DEFAULT_MAX_RETRIES: usize = 5; /// Default base delay between retries. pub const DEFAULT_RETRY_INTERVAL: Duration = Duration::from_secs(1); -impl From> for RobustProvider { - fn from(provider: RootProvider) -> Self { - Self::new(provider) - } -} - impl RobustProvider { /// Create a new `RobustProvider` with default settings. #[must_use] - pub fn new(provider: RootProvider) -> Self { + pub fn new(provider: impl Provider) -> Self { Self { - provider, + provider: provider.root().to_owned(), max_timeout: DEFAULT_MAX_TIMEOUT, max_retries: DEFAULT_MAX_RETRIES, retry_interval: DEFAULT_RETRY_INTERVAL, diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index bc461377..573131f0 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -1,6 +1,6 @@ use alloy::{ eips::{BlockId, BlockNumberOrTag}, - providers::{Provider, ProviderBuilder, ext::AnvilApi}, + providers::{ProviderBuilder, ext::AnvilApi}, rpc::types::anvil::ReorgOptions, }; use alloy_node_bindings::Anvil; @@ -15,7 +15,7 @@ use tokio_stream::StreamExt; async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint().as_str()).await?; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider); // --- Zero block confirmations -> stream immediately --- @@ -67,7 +67,7 @@ async fn stream_from_latest_starts_at_tip_not_confirmed() -> anyhow::Result<()> let block_confirmations = 5; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider); let client = BlockRangeScanner::new().connect(robust_provider.clone()).run()?; @@ -101,7 +101,7 @@ async fn continuous_blocks_if_reorg_less_than_block_confirmation() -> anyhow::Re let block_confirmations = 5; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider.clone()); let client = BlockRangeScanner::new().connect(robust_provider).run()?; @@ -144,7 +144,7 @@ async fn shallow_block_confirmation_does_not_mitigate_reorg() -> anyhow::Result< let block_confirmations = 3; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider.clone()); let client = BlockRangeScanner::new().connect(robust_provider).run()?; @@ -210,7 +210,7 @@ async fn historical_emits_correction_range_when_reorg_below_end() -> anyhow::Res let end_num = 110; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider.clone()); let client = BlockRangeScanner::new().max_block_range(30).connect(robust_provider).run()?; @@ -243,7 +243,7 @@ async fn historical_emits_correction_range_when_end_num_reorgs() -> anyhow::Resu let end_num = 120; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider.clone()); let client = BlockRangeScanner::new().max_block_range(30).connect(robust_provider).run()?; let mut stream = client @@ -275,7 +275,7 @@ async fn historic_mode_respects_blocks_read_per_epoch() -> anyhow::Result<()> { provider.anvil_mine(Some(100), None).await?; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider); let client = BlockRangeScanner::new().max_block_range(5).connect(robust_provider.clone()).run()?; @@ -324,7 +324,7 @@ async fn historic_mode_normalises_start_and_end_block() -> anyhow::Result<()> { let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; provider.anvil_mine(Some(11), None).await?; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider); let client = BlockRangeScanner::new().max_block_range(5).connect(robust_provider).run()?; let mut stream = client.stream_historical(10, 0).await?; @@ -344,7 +344,7 @@ async fn rewind_single_batch_when_epoch_larger_than_range() -> anyhow::Result<() provider.anvil_mine(Some(150), None).await?; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider); let client = BlockRangeScanner::new().max_block_range(100).connect(robust_provider).run()?; let mut stream = client.rewind(100, 150).await?; @@ -364,7 +364,7 @@ async fn rewind_exact_multiple_of_epoch_creates_full_batches_in_reverse() -> any provider.anvil_mine(Some(15), None).await?; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider); let client = BlockRangeScanner::new().max_block_range(5).connect(robust_provider).run()?; let mut stream = client.rewind(0, 14).await?; @@ -386,7 +386,7 @@ async fn rewind_with_remainder_trims_first_batch_to_stream_start() -> anyhow::Re provider.anvil_mine(Some(15), None).await?; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider); let client = BlockRangeScanner::new().max_block_range(4).connect(robust_provider).run()?; let mut stream = client.rewind(3, 12).await?; @@ -408,7 +408,7 @@ async fn rewind_single_block_range() -> anyhow::Result<()> { provider.anvil_mine(Some(15), None).await?; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider); let client = BlockRangeScanner::new().max_block_range(5).connect(robust_provider).run()?; let mut stream = client.rewind(7, 7).await?; @@ -427,7 +427,7 @@ async fn rewind_epoch_of_one_sends_each_block_in_reverse_order() -> anyhow::Resu provider.anvil_mine(Some(15), None).await?; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider); let client = BlockRangeScanner::new().max_block_range(1).connect(robust_provider).run()?; let mut stream = client.rewind(5, 8).await?; @@ -450,7 +450,7 @@ async fn command_rewind_defaults_latest_to_earliest_batches_correctly() -> anyho // Mine 20 blocks, so the total number of blocks is 21 (including 0th block) provider.anvil_mine(Some(20), None).await?; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider); let client = BlockRangeScanner::new().max_block_range(7).connect(robust_provider).run()?; let mut stream = client.rewind(BlockNumberOrTag::Earliest, BlockNumberOrTag::Latest).await?; @@ -471,7 +471,7 @@ async fn command_rewind_handles_start_and_end_in_any_order() -> anyhow::Result<( // Ensure blocks at 3 and 15 exist provider.anvil_mine(Some(16), None).await?; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider); let client = BlockRangeScanner::new().max_block_range(5).connect(robust_provider).run()?; let mut stream = client.rewind(15, 3).await?; @@ -497,7 +497,7 @@ async fn command_rewind_propagates_block_not_found_error() -> anyhow::Result<()> // Do not mine up to 999 so start won't exist let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; - let robust_provider = RobustProvider::new(provider.root().to_owned()); + let robust_provider = RobustProvider::new(provider); let client = BlockRangeScanner::new().max_block_range(5).connect(robust_provider).run()?; let stream = client.rewind(0, 999).await; diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 1ce4d2fd..b8178c1c 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -11,10 +11,7 @@ pub(crate) use setup_scanner::{ }; pub(crate) use test_counter::{TestCounter, TestCounterExt, deploy_counter}; -use alloy::{ - network::Ethereum, - providers::{Provider, ProviderBuilder}, -}; +use alloy::{network::Ethereum, providers::ProviderBuilder}; use alloy_node_bindings::{Anvil, AnvilInstance}; use event_scanner::robust_provider::RobustProvider; @@ -30,6 +27,6 @@ pub async fn build_provider(anvil: &AnvilInstance) -> anyhow::Result Date: Tue, 4 Nov 2025 20:54:36 +0900 Subject: [PATCH 093/122] ref: remove Etheruem generic --- examples/historical_scanning/main.rs | 4 ++-- examples/latest_events_scanning/main.rs | 4 ++-- examples/live_scanning/main.rs | 4 ++-- examples/sync_from_block_scanning/main.rs | 5 ++--- examples/sync_from_latest_scanning/main.rs | 5 ++--- src/event_scanner/scanner/mod.rs | 12 ++++-------- tests/common/setup_scanner.rs | 17 +++++++---------- tests/latest_events/basic.rs | 19 ++++++------------- 8 files changed, 27 insertions(+), 43 deletions(-) diff --git a/examples/historical_scanning/main.rs b/examples/historical_scanning/main.rs index 84db6c29..c1ff1d48 100644 --- a/examples/historical_scanning/main.rs +++ b/examples/historical_scanning/main.rs @@ -1,4 +1,4 @@ -use alloy::{network::Ethereum, providers::ProviderBuilder, sol, sol_types::SolEvent}; +use alloy::{providers::ProviderBuilder, sol, sol_types::SolEvent}; use alloy_node_bindings::Anvil; use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; @@ -53,7 +53,7 @@ async fn main() -> anyhow::Result<()> { let _ = counter_contract.increase().send().await?.get_receipt().await?; let robust_provider = RobustProvider::new(provider.clone()); - let mut scanner = EventScannerBuilder::historic().connect::(robust_provider); + let mut scanner = EventScannerBuilder::historic().connect(robust_provider); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/latest_events_scanning/main.rs b/examples/latest_events_scanning/main.rs index 22744b5f..fae0f04f 100644 --- a/examples/latest_events_scanning/main.rs +++ b/examples/latest_events_scanning/main.rs @@ -1,4 +1,4 @@ -use alloy::{network::Ethereum, providers::ProviderBuilder, sol, sol_types::SolEvent}; +use alloy::{providers::ProviderBuilder, sol, sol_types::SolEvent}; use alloy_node_bindings::Anvil; use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; use tokio_stream::StreamExt; @@ -50,7 +50,7 @@ async fn main() -> anyhow::Result<()> { .event(Counter::CountIncreased::SIGNATURE); let robust_provider = RobustProvider::new(provider.clone()); - let mut scanner = EventScannerBuilder::latest(5).connect::(robust_provider); + let mut scanner = EventScannerBuilder::latest(5).connect(robust_provider); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/live_scanning/main.rs b/examples/live_scanning/main.rs index 098ce1f8..42f57b1f 100644 --- a/examples/live_scanning/main.rs +++ b/examples/live_scanning/main.rs @@ -1,4 +1,4 @@ -use alloy::{network::Ethereum, providers::ProviderBuilder, sol, sol_types::SolEvent}; +use alloy::{providers::ProviderBuilder, sol, sol_types::SolEvent}; use alloy_node_bindings::Anvil; use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; @@ -51,7 +51,7 @@ async fn main() -> anyhow::Result<()> { .event(Counter::CountIncreased::SIGNATURE); let robust_provider = RobustProvider::new(provider.clone()); - let mut scanner = EventScannerBuilder::live().connect::(robust_provider); + let mut scanner = EventScannerBuilder::live().connect(robust_provider); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/sync_from_block_scanning/main.rs b/examples/sync_from_block_scanning/main.rs index aba1c518..d9c270fb 100644 --- a/examples/sync_from_block_scanning/main.rs +++ b/examples/sync_from_block_scanning/main.rs @@ -1,6 +1,6 @@ use std::time::Duration; -use alloy::{network::Ethereum, providers::ProviderBuilder, sol, sol_types::SolEvent}; +use alloy::{providers::ProviderBuilder, sol, sol_types::SolEvent}; use alloy_node_bindings::Anvil; use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; use tokio::time::sleep; @@ -59,8 +59,7 @@ async fn main() -> anyhow::Result<()> { } let robust_provider = RobustProvider::new(provider.clone()); - let mut scanner = - EventScannerBuilder::sync().from_block(0).connect::(robust_provider); + let mut scanner = EventScannerBuilder::sync().from_block(0).connect(robust_provider); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/sync_from_latest_scanning/main.rs b/examples/sync_from_latest_scanning/main.rs index 00d9d1da..554da2b5 100644 --- a/examples/sync_from_latest_scanning/main.rs +++ b/examples/sync_from_latest_scanning/main.rs @@ -1,4 +1,4 @@ -use alloy::{network::Ethereum, providers::ProviderBuilder, sol, sol_types::SolEvent}; +use alloy::{providers::ProviderBuilder, sol, sol_types::SolEvent}; use alloy_node_bindings::Anvil; use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; @@ -51,8 +51,7 @@ async fn main() -> anyhow::Result<()> { .event(Counter::CountIncreased::SIGNATURE); let robust_provider = RobustProvider::new(provider.clone()); - let mut client = - EventScannerBuilder::sync().from_latest(5).connect::(robust_provider); + let mut client = EventScannerBuilder::sync().from_latest(5).connect(robust_provider); let mut stream = client.subscribe(increase_filter); diff --git a/src/event_scanner/scanner/mod.rs b/src/event_scanner/scanner/mod.rs index fc80f03b..a425d652 100644 --- a/src/event_scanner/scanner/mod.rs +++ b/src/event_scanner/scanner/mod.rs @@ -424,10 +424,10 @@ mod tests { } #[tokio::test] - async fn test_historic_event_stream_listeners_vector_updates() -> anyhow::Result<()> { + async fn test_historic_event_stream_listeners_vector_updates() { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); let robust_provider = RobustProvider::new(provider.clone()); - let mut scanner = EventScannerBuilder::historic().connect::(robust_provider); + let mut scanner = EventScannerBuilder::historic().connect(robust_provider); assert!(scanner.listeners.is_empty()); @@ -437,21 +437,17 @@ mod tests { let _stream2 = scanner.subscribe(EventFilter::new()); let _stream3 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 3); - - Ok(()) } #[tokio::test] - async fn test_historic_event_stream_channel_capacity() -> anyhow::Result<()> { + async fn test_historic_event_stream_channel_capacity() { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); let robust_provider = RobustProvider::new(provider.clone()); - let mut scanner = EventScannerBuilder::historic().connect::(robust_provider); + let mut scanner = EventScannerBuilder::historic().connect(robust_provider); let _ = scanner.subscribe(EventFilter::new()); let sender = &scanner.listeners[0].sender; assert_eq!(sender.capacity(), MAX_BUFFERED_MESSAGES); - - Ok(()) } } diff --git a/tests/common/setup_scanner.rs b/tests/common/setup_scanner.rs index f770f36f..ec838e1b 100644 --- a/tests/common/setup_scanner.rs +++ b/tests/common/setup_scanner.rs @@ -63,9 +63,8 @@ pub async fn setup_live_scanner( ) -> anyhow::Result + Clone>> { let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; - let mut scanner = EventScannerBuilder::live() - .block_confirmations(confirmations) - .connect::(provider.clone()); + let mut scanner = + EventScannerBuilder::live().block_confirmations(confirmations).connect(provider.clone()); let stream = scanner.subscribe(filter); @@ -83,7 +82,7 @@ pub async fn setup_sync_scanner( let mut scanner = EventScannerBuilder::sync() .from_block(from) .block_confirmations(confirmations) - .connect::(provider.clone()); + .connect(provider.clone()); let stream = scanner.subscribe(filter); @@ -101,7 +100,7 @@ pub async fn setup_sync_from_latest_scanner( let mut scanner = EventScannerBuilder::sync() .from_latest(latest) .block_confirmations(confirmations) - .connect::(provider.clone()); + .connect(provider.clone()); let stream = scanner.subscribe(filter); @@ -115,10 +114,8 @@ pub async fn setup_historic_scanner( to: BlockNumberOrTag, ) -> anyhow::Result + Clone>> { let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; - let mut scanner = EventScannerBuilder::historic() - .from_block(from) - .to_block(to) - .connect::(provider.clone()); + let mut scanner = + EventScannerBuilder::historic().from_block(from).to_block(to).connect(provider.clone()); let stream = scanner.subscribe(filter); @@ -141,7 +138,7 @@ pub async fn setup_latest_scanner( builder = builder.to_block(t); } - let mut scanner = builder.connect::(provider.clone()); + let mut scanner = builder.connect(provider.clone()); let stream = scanner.subscribe(filter); diff --git a/tests/latest_events/basic.rs b/tests/latest_events/basic.rs index 522c5694..162fe4fd 100644 --- a/tests/latest_events/basic.rs +++ b/tests/latest_events/basic.rs @@ -1,6 +1,5 @@ use alloy::{ eips::BlockNumberOrTag, - network::Ethereum, providers::{Provider, ext::AnvilApi}, sol_types::SolEvent, }; @@ -102,10 +101,8 @@ async fn latest_scanner_respects_range_subset() -> anyhow::Result<()> { let start = BlockNumberOrTag::from(head - 3); let end = BlockNumberOrTag::from(head); - let mut scanner_with_range = EventScannerBuilder::latest(10) - .from_block(start) - .to_block(end) - .connect::(provider); + let mut scanner_with_range = + EventScannerBuilder::latest(10).from_block(start).to_block(end).connect(provider); let mut stream_with_range = scanner_with_range.subscribe(default_filter); scanner_with_range.start().await?; @@ -297,10 +294,8 @@ async fn latest_scanner_large_gaps_and_empty_ranges() -> anyhow::Result<()> { let start = BlockNumberOrTag::from(head - 12); let end = BlockNumberOrTag::from(head); - let mut scanner_with_range = EventScannerBuilder::latest(5) - .from_block(start) - .to_block(end) - .connect::(provider); + let mut scanner_with_range = + EventScannerBuilder::latest(5).from_block(start).to_block(end).connect(provider); let mut stream_with_range = scanner_with_range.subscribe(default_filter); scanner_with_range.start().await?; @@ -330,10 +325,8 @@ async fn latest_scanner_boundary_range_single_block() -> anyhow::Result<()> { .unwrap(); let end = start; - let mut scanner_with_range = EventScannerBuilder::latest(5) - .from_block(start) - .to_block(end) - .connect::(provider); + let mut scanner_with_range = + EventScannerBuilder::latest(5).from_block(start).to_block(end).connect(provider); let mut stream_with_range = scanner_with_range.subscribe(default_filter); scanner_with_range.start().await?; From 7cae13bfac093d733e5debbdceebe7d2f5890ad7 Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 4 Nov 2025 20:56:41 +0900 Subject: [PATCH 094/122] ref: remove is_err check --- src/robust_provider.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 2b44c339..9de4d7f3 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -228,7 +228,7 @@ impl RobustProvider { return Ok(value); } - if result.is_err() && self.fallback_providers.is_empty() { + if self.fallback_providers.is_empty() { return result; } From a61670395caf140a91828c56c65eedfaef505a6c Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 4 Nov 2025 20:59:33 +0900 Subject: [PATCH 095/122] ref: change inner --> root --- src/robust_provider.rs | 6 +++--- tests/block_range_scanner.rs | 14 +++++++------- tests/common/setup_scanner.rs | 2 +- tests/latest_events/basic.rs | 10 +++++----- tests/live/basic.rs | 2 +- tests/live/reorg.rs | 14 +++++++------- tests/sync/from_block.rs | 8 ++++---- tests/sync/from_latest.rs | 4 ++-- 8 files changed, 30 insertions(+), 30 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 9de4d7f3..42ab1544 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -81,9 +81,9 @@ impl RobustProvider { self } - /// Get a reference to the inner provider + /// Get a reference to the primary provider #[must_use] - pub fn inner(&self) -> &RootProvider { + pub fn root(&self) -> &RootProvider { &self.provider } @@ -91,7 +91,7 @@ impl RobustProvider { /// /// Fallback providers are used when the primary provider times out. #[must_use] - pub fn fallback_provider(mut self, provider: RootProvider) -> Self { + pub fn fallback(mut self, provider: RootProvider) -> Self { self.fallback_providers.push(provider); self } diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index 573131f0..1055e28c 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -23,7 +23,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh let mut stream = client.stream_live(0).await?; - robust_provider.inner().anvil_mine(Some(5), None).await?; + robust_provider.root().anvil_mine(Some(5), None).await?; assert_next!(stream, 1..=1); assert_next!(stream, 2..=2); @@ -32,7 +32,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh assert_next!(stream, 5..=5); let mut stream = assert_empty!(stream); - robust_provider.inner().anvil_mine(Some(1), None).await?; + robust_provider.root().anvil_mine(Some(1), None).await?; assert_next!(stream, 6..=6); assert_empty!(stream); @@ -41,7 +41,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh let mut stream = client.stream_live(1).await?; - robust_provider.inner().anvil_mine(Some(5), None).await?; + robust_provider.root().anvil_mine(Some(5), None).await?; assert_next!(stream, 6..=6); assert_next!(stream, 7..=7); @@ -50,7 +50,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh assert_next!(stream, 10..=10); let mut stream = assert_empty!(stream); - robust_provider.inner().anvil_mine(Some(1), None).await?; + robust_provider.root().anvil_mine(Some(1), None).await?; assert_next!(stream, 11..=11); assert_empty!(stream); @@ -75,16 +75,16 @@ async fn stream_from_latest_starts_at_tip_not_confirmed() -> anyhow::Result<()> let stream = assert_empty!(stream); - robust_provider.inner().anvil_mine(Some(4), None).await?; + robust_provider.root().anvil_mine(Some(4), None).await?; let mut stream = assert_empty!(stream); - robust_provider.inner().anvil_mine(Some(1), None).await?; + robust_provider.root().anvil_mine(Some(1), None).await?; assert_next!(stream, 20..=20); let mut stream = assert_empty!(stream); - robust_provider.inner().anvil_mine(Some(1), None).await?; + robust_provider.root().anvil_mine(Some(1), None).await?; assert_next!(stream, 21..=21); assert_empty!(stream); diff --git a/tests/common/setup_scanner.rs b/tests/common/setup_scanner.rs index ec838e1b..8c70601d 100644 --- a/tests/common/setup_scanner.rs +++ b/tests/common/setup_scanner.rs @@ -46,7 +46,7 @@ pub async fn setup_common( )> { let anvil = spawn_anvil(block_interval)?; let provider = build_provider(&anvil).await?; - let contract = deploy_counter(provider.inner().clone()).await?; + let contract = deploy_counter(provider.root().clone()).await?; let default_filter = EventFilter::new().contract_address(*contract.address()).event(CountIncreased::SIGNATURE); diff --git a/tests/latest_events/basic.rs b/tests/latest_events/basic.rs index 162fe4fd..bad46f3a 100644 --- a/tests/latest_events/basic.rs +++ b/tests/latest_events/basic.rs @@ -94,7 +94,7 @@ async fn latest_scanner_respects_range_subset() -> anyhow::Result<()> { expected.push(contract.increase_and_get_meta().await?); // manual empty block minting - provider.inner().anvil_mine(Some(2), None).await?; + provider.root().anvil_mine(Some(2), None).await?; let head = provider.get_block_number().await?; // Choose a subrange covering last 4 blocks @@ -249,8 +249,8 @@ async fn latest_scanner_cross_contract_filtering() -> anyhow::Result<()> { let provider = setup.provider; let mut scanner = setup.scanner; - let contract_a = deploy_counter(provider.inner().clone()).await?; - let contract_b = deploy_counter(provider.inner().clone()).await?; + let contract_a = deploy_counter(provider.root().clone()).await?; + let contract_b = deploy_counter(provider.root().clone()).await?; // Listener only for contract A CountIncreased let filter_a = EventFilter::new() @@ -286,7 +286,7 @@ async fn latest_scanner_large_gaps_and_empty_ranges() -> anyhow::Result<()> { log_meta.push(contract.increase_and_get_meta().await?); // Mine 10 empty blocks - provider.inner().anvil_mine(Some(10), None).await?; + provider.root().anvil_mine(Some(10), None).await?; // Emit 1 more event log_meta.push(contract.increase_and_get_meta().await?); @@ -317,7 +317,7 @@ async fn latest_scanner_boundary_range_single_block() -> anyhow::Result<()> { // Pick the expected tx's block number as the block range let expected_tx_hash = expected[0].tx_hash; let start = provider - .inner() + .root() .get_transaction_by_hash(expected_tx_hash) .await? .map(|t| t.block_number.unwrap()) diff --git a/tests/live/basic.rs b/tests/live/basic.rs index 7253238e..829012aa 100644 --- a/tests/live/basic.rs +++ b/tests/live/basic.rs @@ -65,7 +65,7 @@ async fn multiple_contracts_same_event_isolate_callbacks() -> anyhow::Result<()> let setup = setup_live_scanner(Some(0.1), None, 0).await?; let provider = setup.provider.clone(); let a = setup.contract.clone(); - let b = deploy_counter(provider.inner().clone()).await?; + let b = deploy_counter(provider.root().clone()).await?; let a_filter = EventFilter::new() .contract_address(*a.address()) diff --git a/tests/live/reorg.rs b/tests/live/reorg.rs index 6532b4cf..842068a3 100644 --- a/tests/live/reorg.rs +++ b/tests/live/reorg.rs @@ -35,7 +35,7 @@ async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { (TransactionData::JSON(contract.increase().into_transaction_request()), 0), ]; - provider.inner().anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await?; + provider.root().anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await?; // assert expected messages post-reorg assert_next!(stream, ScannerStatus::ReorgDetected); @@ -79,7 +79,7 @@ async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { (TransactionData::JSON(contract.increase().into_transaction_request()), 2), ]; - provider.inner().anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await?; + provider.root().anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await?; // assert expected messages post-reorg assert_next!(stream, ScannerStatus::ReorgDetected); @@ -114,7 +114,7 @@ async fn reorg_depth_one() -> anyhow::Result<()> { let tx_block_pairs = vec![(TransactionData::JSON(contract.increase().into_transaction_request()), 0)]; - provider.inner().anvil_reorg(ReorgOptions { depth: 1, tx_block_pairs }).await?; + provider.root().anvil_reorg(ReorgOptions { depth: 1, tx_block_pairs }).await?; // assert expected messages post-reorg assert_next!(stream, ScannerStatus::ReorgDetected); @@ -147,7 +147,7 @@ async fn reorg_depth_two() -> anyhow::Result<()> { let tx_block_pairs = vec![(TransactionData::JSON(contract.increase().into_transaction_request()), 0)]; - provider.inner().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; + provider.root().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; // assert expected messages post-reorg assert_next!(stream, ScannerStatus::ReorgDetected); @@ -166,7 +166,7 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { scanner.start().await?; // mine some initial blocks - provider.inner().anvil_mine(Some(10), None).await?; + provider.root().anvil_mine(Some(10), None).await?; // emit initial events for _ in 0..4 { @@ -184,13 +184,13 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { (TransactionData::JSON(contract.increase().into_transaction_request()), 0), ]; - provider.inner().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; + provider.root().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; // assert no events have still been streamed let mut stream = assert_empty!(stream); // mine some additional post-reorg blocks - provider.inner().anvil_mine(Some(10), None).await?; + provider.root().anvil_mine(Some(10), None).await?; // no `ReorgDetected` should be emitted assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); diff --git a/tests/sync/from_block.rs b/tests/sync/from_block.rs index 00aa1c0e..92f50624 100644 --- a/tests/sync/from_block.rs +++ b/tests/sync/from_block.rs @@ -85,7 +85,7 @@ async fn block_confirmations_mitigate_reorgs_historic_to_live() -> anyhow::Resul let contract = setup.contract.clone(); // mine some blocks to establish a baseline - provider.clone().inner().anvil_mine(Some(10), None).await?; + provider.clone().root().anvil_mine(Some(10), None).await?; let scanner = setup.scanner; let mut stream = setup.stream; @@ -98,7 +98,7 @@ async fn block_confirmations_mitigate_reorgs_historic_to_live() -> anyhow::Resul } // mine enough blocks to confirm first 2 events (but not events 3 and 4) - provider.clone().inner().anvil_mine(Some(7), None).await?; + provider.clone().root().anvil_mine(Some(7), None).await?; // assert first 2 events are emitted (confirmed) assert_next!(stream, ScannerStatus::SwitchingToLive); @@ -113,10 +113,10 @@ async fn block_confirmations_mitigate_reorgs_historic_to_live() -> anyhow::Resul (TransactionData::JSON(contract.increase().into_transaction_request()), 1), ]; - provider.clone().inner().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; + provider.clone().root().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; // mine enough blocks to confirm the new events - provider.clone().inner().anvil_mine(Some(10), None).await?; + provider.clone().root().anvil_mine(Some(10), None).await?; // assert the new events from the reorged chain // No ReorgDetected should be emitted because the reorg happened before confirmation diff --git a/tests/sync/from_latest.rs b/tests/sync/from_latest.rs index f0d8eb5f..65f8926d 100644 --- a/tests/sync/from_latest.rs +++ b/tests/sync/from_latest.rs @@ -126,12 +126,12 @@ async fn scan_latest_then_live_boundary_no_duplication() -> anyhow::Result<()> { let mut expected_latest = vec![]; expected_latest.push(contract.increase_and_get_meta().await?); - provider.inner().anvil_mine(Some(1), None).await?; + provider.root().anvil_mine(Some(1), None).await?; expected_latest.push(contract.increase_and_get_meta().await?); expected_latest.push(contract.increase_and_get_meta().await?); - provider.inner().anvil_mine(Some(1), None).await?; + provider.root().anvil_mine(Some(1), None).await?; scanner.start().await?; From f3958afbbac45fdcff50809062bb97003c4950bb Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 4 Nov 2025 21:07:00 +0900 Subject: [PATCH 096/122] feat: use providers vec to store all primary and fallback providers --- src/robust_provider.rs | 78 ++++++++++++++++++++++-------------------- 1 file changed, 40 insertions(+), 38 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 42ab1544..7bc34629 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -33,13 +33,13 @@ impl From> for Error { /// /// This wrapper around Alloy providers automatically handles retries, /// timeouts, and error logging for RPC calls. +/// The first provider in the vector is treated as the primary provider. #[derive(Clone)] pub struct RobustProvider { - provider: RootProvider, + providers: Vec>, max_timeout: Duration, max_retries: usize, retry_interval: Duration, - fallback_providers: Vec>, } // RPC retry and timeout settings @@ -52,14 +52,14 @@ pub const DEFAULT_RETRY_INTERVAL: Duration = Duration::from_secs(1); impl RobustProvider { /// Create a new `RobustProvider` with default settings. + /// The provided provider is treated as the primary provider. #[must_use] pub fn new(provider: impl Provider) -> Self { Self { - provider: provider.root().to_owned(), + providers: vec![provider.root().to_owned()], max_timeout: DEFAULT_MAX_TIMEOUT, max_retries: DEFAULT_MAX_RETRIES, retry_interval: DEFAULT_RETRY_INTERVAL, - fallback_providers: Vec::new(), } } @@ -81,18 +81,23 @@ impl RobustProvider { self } - /// Get a reference to the primary provider + /// Get a reference to the primary provider (the first provider in the list) + /// + /// # Panics + /// + /// If there are no providers set (this should never happen) #[must_use] pub fn root(&self) -> &RootProvider { - &self.provider + // Safe to unwrap because we always have at least one provider + self.providers.first().expect("providers vector should never be empty") } /// Add a fallback provider to the list. /// - /// Fallback providers are used when the primary provider times out. + /// Fallback providers are used when the primary provider times out or fails. #[must_use] pub fn fallback(mut self, provider: RootProvider) -> Self { - self.fallback_providers.push(provider); + self.providers.push(provider); self } @@ -189,7 +194,7 @@ impl RobustProvider { pub async fn subscribe_blocks(&self) -> Result, Error> { info!("eth_subscribe called"); // We need this otherwise error is not clear - self.provider.client().expect_pubsub_frontend(); + self.root().client().expect_pubsub_frontend(); let result = self .retry_with_total_timeout( move |provider| async move { provider.subscribe_blocks().await }, @@ -221,43 +226,41 @@ impl RobustProvider { F: Fn(RootProvider) -> Fut, Fut: Future>>, { - // Try primary provider first - let result = self.try_provider_with_timeout(&self.provider, &operation).await; - - if let Ok(value) = result { - return Ok(value); - } - - if self.fallback_providers.is_empty() { - return result; - } - - info!("Primary provider failed, trying fallback provider(s)"); - - // Try each fallback provider - for (idx, fallback_provider) in self.fallback_providers.iter().enumerate() { - info!( - "Attempting fallback provider {} out of {}", - idx + 1, - self.fallback_providers.len() - ); + let mut last_error = None; + + // Try each provider in sequence (first one is primary) + for (idx, provider) in self.providers.iter().enumerate() { + if idx == 0 { + info!("Attempting primary provider"); + } else { + info!("Attempting fallback provider {} out of {}", idx, self.providers.len() - 1); + } - let fallback_result = - self.try_provider_with_timeout(fallback_provider, &operation).await; + let result = self.try_provider_with_timeout(provider, &operation).await; - match fallback_result { + match result { Ok(value) => { - info!(provider_num = idx + 1, "Fallback provider succeeded"); + if idx > 0 { + info!(provider_num = idx, "Fallback provider succeeded"); + } return Ok(value); } Err(e) => { - error!(provider_num = idx + 1, err = %e, "Fallback provider failed with error"); + last_error = Some(e); + if idx == 0 { + if self.providers.len() > 1 { + info!("Primary provider failed, trying fallback provider(s)"); + } + } else { + error!(provider_num = idx, err = %last_error.as_ref().unwrap(), "Fallback provider failed with error"); + } } } } - error!("All fallback providers failed or timed out"); - Err(Error::Timeout) + error!("All providers failed or timed out"); + // Return the last error encountered + Err(last_error.unwrap_or(Error::Timeout)) } /// Try executing an operation with a specific provider with retry and timeout. @@ -299,11 +302,10 @@ mod tests { retry_interval: u64, ) -> RobustProvider { RobustProvider { - provider: RootProvider::new_http("http://localhost:8545".parse().unwrap()), + providers: vec![RootProvider::new_http("http://localhost:8545".parse().unwrap())], max_timeout: Duration::from_millis(timeout), max_retries, retry_interval: Duration::from_millis(retry_interval), - fallback_providers: Vec::new(), } } From 8b1079cc306be118b82a48fc16724ec5c39d5ae6 Mon Sep 17 00:00:00 2001 From: Leo Date: Tue, 4 Nov 2025 21:12:16 +0900 Subject: [PATCH 097/122] feat: add retry notification --- src/robust_provider.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 7bc34629..023bb21d 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -279,7 +279,12 @@ impl RobustProvider { match timeout( self.max_timeout, - (|| operation(provider.clone())).retry(retry_strategy).sleep(tokio::time::sleep), + (|| operation(provider.clone())) + .retry(retry_strategy) + .notify(|err: &RpcError, dur: Duration| { + info!(error = %err, "RPC error retrying after {:?}", dur); + }) + .sleep(tokio::time::sleep), ) .await { From ef3b363583a7d83feebfdc5fa6489058e209b0cb Mon Sep 17 00:00:00 2001 From: Leo Date: Wed, 5 Nov 2025 23:04:21 +0900 Subject: [PATCH 098/122] fix: test --- tests/block_range_scanner.rs | 38 ++++++++++++++---------------------- tests/sync/from_block.rs | 18 +++++++++++++++-- 2 files changed, 31 insertions(+), 25 deletions(-) diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index fb02ac52..1d3cb447 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -1,14 +1,12 @@ use alloy::{ eips::{BlockId, BlockNumberOrTag}, - network::Ethereum, - providers::{Provider, ProviderBuilder, ext::AnvilApi}, + providers::{ProviderBuilder, ext::AnvilApi}, rpc::types::anvil::ReorgOptions, }; use alloy_node_bindings::Anvil; use event_scanner::{ ScannerError, ScannerStatus, assert_closed, assert_empty, assert_next, - block_range_scanner::{BlockRangeScanner, Message}, - robust_provider::RobustProvider, + block_range_scanner::BlockRangeScanner, robust_provider::RobustProvider, }; #[tokio::test] @@ -63,27 +61,23 @@ async fn stream_from_latest_starts_at_tip_not_confirmed() -> anyhow::Result<()> let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; - provider.anvil_mine(Some(20), None).await?; - let robust_provider = RobustProvider::new(provider); - let client = BlockRangeScanner::new().connect(robust_provider.clone()).run()?; + robust_provider.root().anvil_mine(Some(20), None).await?; + let stream = client.stream_from(BlockNumberOrTag::Latest, 5).await?; let stream = assert_empty!(stream); robust_provider.root().anvil_mine(Some(4), None).await?; - let mut stream = assert_empty!(stream); robust_provider.root().anvil_mine(Some(1), None).await?; - assert_next!(stream, 20..=20); let mut stream = assert_empty!(stream); robust_provider.root().anvil_mine(Some(1), None).await?; - assert_next!(stream, 21..=21); assert_empty!(stream); @@ -95,16 +89,13 @@ async fn continuous_blocks_if_reorg_less_than_block_confirmation() -> anyhow::Re let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; - let provider = ProviderBuilder::new().connect(anvil.ws_endpoint().as_str()).await?; - - let robust_provider = RobustProvider::new(provider.clone()); - - let client = BlockRangeScanner::new().connect(robust_provider).run()?; + let robust_provider = RobustProvider::new(provider); + let client = BlockRangeScanner::new().connect(robust_provider.clone()).run()?; - let mut receiver = client.stream_live(5).await?; + let mut stream = client.stream_live(5).await?; // mine initial blocks - provider.anvil_mine(Some(10), None).await?; + robust_provider.root().anvil_mine(Some(10), None).await?; // assert initial block ranges immediately to avoid Anvil race condition: // @@ -119,9 +110,9 @@ async fn continuous_blocks_if_reorg_less_than_block_confirmation() -> anyhow::Re assert_next!(stream, 5..=5); // reorg less blocks than the block_confirmation config - provider.anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs: vec![] }).await?; + robust_provider.root().anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs: vec![] }).await?; // mint additional blocks so the scanner processes reorged blocks - provider.anvil_mine(Some(5), None).await?; + robust_provider.root().anvil_mine(Some(5), None).await?; // no ReorgDetected should be emitted assert_next!(stream, 6..=6); @@ -139,12 +130,13 @@ async fn shallow_block_confirmation_does_not_mitigate_reorg() -> anyhow::Result< let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; - let client = BlockRangeScanner::new().connect::(provider.root().clone()).run()?; + let robust_provider = RobustProvider::new(provider); + let client = BlockRangeScanner::new().connect(robust_provider.clone()).run()?; let mut stream = client.stream_live(3).await?; // mine initial blocks - provider.anvil_mine(Some(10), None).await?; + robust_provider.root().anvil_mine(Some(10), None).await?; // assert initial block ranges immediately to avoid Anvil race condition: // @@ -161,9 +153,9 @@ async fn shallow_block_confirmation_does_not_mitigate_reorg() -> anyhow::Result< assert_next!(stream, 7..=7); // reorg more blocks than the block_confirmation config - provider.anvil_reorg(ReorgOptions { depth: 8, tx_block_pairs: vec![] }).await?; + robust_provider.root().anvil_reorg(ReorgOptions { depth: 8, tx_block_pairs: vec![] }).await?; // mint additional blocks - provider.anvil_mine(Some(3), None).await?; + robust_provider.root().anvil_mine(Some(3), None).await?; assert_next!(stream, ScannerStatus::ReorgDetected); assert_next!(stream, 0..=0); diff --git a/tests/sync/from_block.rs b/tests/sync/from_block.rs index 61068c9f..18b3d50d 100644 --- a/tests/sync/from_block.rs +++ b/tests/sync/from_block.rs @@ -86,15 +86,29 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { // mine some initial "historic" blocks contract.increase().send().await?.watch().await?; - provider.anvil_mine(Some(5), None).await?; + provider.root().anvil_mine(Some(5), None).await?; scanner.start().await?; - // emit initial events + // emit "live" events for _ in 0..4 { contract.increase().send().await?.watch().await?; } + // assert only the first events has enough confirmations to be streamed + assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(1) }]); + assert_next!(stream, ScannerStatus::SwitchingToLive); + let stream = assert_empty!(stream); + + // Perform a shallow reorg on the live tail + // note: we include new txs in the same post-reorg block to showcase that the scanner + // only streams the post-reorg, confirmed logs + let tx_block_pairs = vec![ + (TransactionData::JSON(contract.increase().into_transaction_request()), 0), + (TransactionData::JSON(contract.increase().into_transaction_request()), 0), + ]; + provider.root().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; + // assert that still no events have been streamed let mut stream = assert_empty!(stream); From e9900cf657548ca6c018f102d16f350c8c33661a Mon Sep 17 00:00:00 2001 From: Leo Date: Wed, 5 Nov 2025 23:11:24 +0900 Subject: [PATCH 099/122] fix: add back timeoute --- tests/block_range_scanner.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index 1d3cb447..1a3e6a61 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -23,11 +23,11 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh robust_provider.root().anvil_mine(Some(5), None).await?; - assert_next!(stream, 1..=1); - assert_next!(stream, 2..=2); - assert_next!(stream, 3..=3); - assert_next!(stream, 4..=4); - assert_next!(stream, 5..=5); + assert_next!(stream, 1..=1, timeout = 10); + assert_next!(stream, 2..=2, timeout = 10); + assert_next!(stream, 3..=3, timeout = 10); + assert_next!(stream, 4..=4, timeout = 10); + assert_next!(stream, 5..=5, timeout = 10); let mut stream = assert_empty!(stream); robust_provider.root().anvil_mine(Some(1), None).await?; @@ -41,11 +41,11 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh robust_provider.root().anvil_mine(Some(5), None).await?; - assert_next!(stream, 6..=6); - assert_next!(stream, 7..=7); - assert_next!(stream, 8..=8); - assert_next!(stream, 9..=9); - assert_next!(stream, 10..=10); + assert_next!(stream, 6..=6, timeout = 10); + assert_next!(stream, 7..=7, timeout = 10); + assert_next!(stream, 8..=8, timeout = 10); + assert_next!(stream, 9..=9, timeout = 10); + assert_next!(stream, 10..=10, timeout = 10); let mut stream = assert_empty!(stream); robust_provider.root().anvil_mine(Some(1), None).await?; From 809b65791e9cf1c5e3e946cc1f63644fc84e7af0 Mon Sep 17 00:00:00 2001 From: Leo Date: Wed, 5 Nov 2025 21:50:36 +0900 Subject: [PATCH 100/122] feat: better error handling --- src/robust_provider.rs | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 023bb21d..34a935ab 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -10,7 +10,7 @@ use alloy::{ }; use backon::{ExponentialBuilder, Retryable}; use thiserror::Error; -use tokio::time::timeout; +use tokio::time::{error as TokioError, timeout}; use tracing::{error, info}; #[derive(Error, Debug, Clone)] @@ -29,6 +29,12 @@ impl From> for Error { } } +impl From for Error { + fn from(_: TokioError::Elapsed) -> Self { + Error::Timeout + } +} + /// Provider wrapper with built-in retry and timeout mechanisms. /// /// This wrapper around Alloy providers automatically handles retries, @@ -44,9 +50,9 @@ pub struct RobustProvider { // RPC retry and timeout settings /// Default timeout used by `RobustProvider` -pub const DEFAULT_MAX_TIMEOUT: Duration = Duration::from_secs(30); +pub const DEFAULT_MAX_TIMEOUT: Duration = Duration::from_secs(60); /// Default maximum number of retry attempts. -pub const DEFAULT_MAX_RETRIES: usize = 5; +pub const DEFAULT_MAX_RETRIES: usize = 3; /// Default base delay between retries. pub const DEFAULT_RETRY_INTERVAL: Duration = Duration::from_secs(1); @@ -247,10 +253,8 @@ impl RobustProvider { } Err(e) => { last_error = Some(e); - if idx == 0 { - if self.providers.len() > 1 { - info!("Primary provider failed, trying fallback provider(s)"); - } + if idx == 0 && self.providers.len() > 1 { + info!("Primary provider failed, trying fallback provider(s)"); } else { error!(provider_num = idx, err = %last_error.as_ref().unwrap(), "Fallback provider failed with error"); } @@ -277,7 +281,7 @@ impl RobustProvider { .with_max_times(self.max_retries) .with_min_delay(self.retry_interval); - match timeout( + timeout( self.max_timeout, (|| operation(provider.clone())) .retry(retry_strategy) @@ -287,10 +291,8 @@ impl RobustProvider { .sleep(tokio::time::sleep), ) .await - { - Ok(res) => res.map_err(Error::from), - Err(_) => Err(Error::Timeout), - } + .map_err(Error::from)? + .map_err(Error::from) } } From e4d5d625c89d1319661e74fa92d332a8df7250c5 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 6 Nov 2025 16:58:35 +0900 Subject: [PATCH 101/122] feat: remove retry erro --- src/error.rs | 7 +------ src/robust_provider.rs | 6 +++--- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/src/error.rs b/src/error.rs index cc67dc13..6914e8bd 100644 --- a/src/error.rs +++ b/src/error.rs @@ -10,8 +10,6 @@ use crate::robust_provider::Error as RobustProviderError; #[derive(Error, Debug, Clone)] pub enum ScannerError { - // #[error("WebSocket error: {0}")] - // WebSocketError(#[from] tokio_tungstenite::tungstenite::Error), #[error("Serialization error: {0}")] SerializationError(Arc), @@ -35,16 +33,13 @@ pub enum ScannerError { #[error("Operation timed out")] Timeout, - - #[error("RPC call failed after exhausting all retry attempts: {0}")] - RetryFailure(Arc>), } impl From for ScannerError { fn from(error: RobustProviderError) -> ScannerError { match error { RobustProviderError::Timeout => ScannerError::Timeout, - RobustProviderError::RetryFailure(err) => ScannerError::RetryFailure(err), + RobustProviderError::RpcError(err) => ScannerError::RpcError(err), RobustProviderError::BlockNotFound(block) => ScannerError::BlockNotFound(block), } } diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 34a935ab..305c374e 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -18,14 +18,14 @@ pub enum Error { #[error("Operation timed out")] Timeout, #[error("RPC call failed after exhausting all retry attempts: {0}")] - RetryFailure(Arc>), + RpcError(Arc>), #[error("Block not found, Block Id: {0}")] BlockNotFound(BlockId), } impl From> for Error { fn from(err: RpcError) -> Self { - Error::RetryFailure(Arc::new(err)) + Error::RpcError(Arc::new(err)) } } @@ -366,7 +366,7 @@ mod tests { }) .await; - assert!(matches!(result, Err(Error::RetryFailure(_)))); + assert!(matches!(result, Err(Error::RpcError(_)))); assert_eq!(call_count.load(Ordering::SeqCst), 3); } From 297fc354ac181fcb018d15c92ee0fbf0718587b6 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 6 Nov 2025 19:57:35 +0900 Subject: [PATCH 102/122] feat: retry_interval to min delay --- src/robust_provider.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 305c374e..8c7e2d13 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -82,7 +82,7 @@ impl RobustProvider { } #[must_use] - pub fn retry_interval(mut self, retry_interval: Duration) -> Self { + pub fn min_delay(mut self, retry_interval: Duration) -> Self { self.retry_interval = retry_interval; self } From b93bd28ec03d889e5aa08acf06dcf630d26893e4 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 6 Nov 2025 19:58:30 +0900 Subject: [PATCH 103/122] feat: rename retry interval in other places --- src/robust_provider.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 8c7e2d13..07568071 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -45,7 +45,7 @@ pub struct RobustProvider { providers: Vec>, max_timeout: Duration, max_retries: usize, - retry_interval: Duration, + min_delay: Duration, } // RPC retry and timeout settings @@ -54,7 +54,7 @@ pub const DEFAULT_MAX_TIMEOUT: Duration = Duration::from_secs(60); /// Default maximum number of retry attempts. pub const DEFAULT_MAX_RETRIES: usize = 3; /// Default base delay between retries. -pub const DEFAULT_RETRY_INTERVAL: Duration = Duration::from_secs(1); +pub const DEFAULT_MIN_DELAY: Duration = Duration::from_secs(1); impl RobustProvider { /// Create a new `RobustProvider` with default settings. @@ -65,7 +65,7 @@ impl RobustProvider { providers: vec![provider.root().to_owned()], max_timeout: DEFAULT_MAX_TIMEOUT, max_retries: DEFAULT_MAX_RETRIES, - retry_interval: DEFAULT_RETRY_INTERVAL, + min_delay: DEFAULT_MIN_DELAY, } } @@ -83,7 +83,7 @@ impl RobustProvider { #[must_use] pub fn min_delay(mut self, retry_interval: Duration) -> Self { - self.retry_interval = retry_interval; + self.min_delay = retry_interval; self } @@ -279,7 +279,7 @@ impl RobustProvider { { let retry_strategy = ExponentialBuilder::default() .with_max_times(self.max_retries) - .with_min_delay(self.retry_interval); + .with_min_delay(self.min_delay); timeout( self.max_timeout, @@ -312,7 +312,7 @@ mod tests { providers: vec![RootProvider::new_http("http://localhost:8545".parse().unwrap())], max_timeout: Duration::from_millis(timeout), max_retries, - retry_interval: Duration::from_millis(retry_interval), + min_delay: Duration::from_millis(retry_interval), } } From 635c02a0e3946dbb1225a1d92a11455dbb045960 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 6 Nov 2025 20:00:11 +0900 Subject: [PATCH 104/122] feat: doc comments --- src/robust_provider.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 07568071..a803a2a3 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -69,18 +69,21 @@ impl RobustProvider { } } + /// Set the maximum timeout for RPC operations. #[must_use] pub fn max_timeout(mut self, timeout: Duration) -> Self { self.max_timeout = timeout; self } + /// Set the maximum number of retry attempts. #[must_use] pub fn max_retries(mut self, max_retries: usize) -> Self { self.max_retries = max_retries; self } + /// Set the base delay for exponential backoff retries. #[must_use] pub fn min_delay(mut self, retry_interval: Duration) -> Self { self.min_delay = retry_interval; From d1aaa04867564708a2553a6015642c72babf3aea Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 6 Nov 2025 20:06:01 +0900 Subject: [PATCH 105/122] fix: import and provider issues in test --- tests/common/mod.rs | 4 ++-- tests/common/test_counter.rs | 41 +----------------------------------- tests/latest_events/basic.rs | 7 ++---- 3 files changed, 5 insertions(+), 47 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index b8178c1c..e687d150 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -6,10 +6,10 @@ pub mod setup_scanner; pub mod test_counter; pub(crate) use setup_scanner::{ - LiveScannerSetup, setup_common, setup_historic_scanner, setup_latest_scanner, + LiveScannerSetup, SyncScannerSetup, setup_common, setup_historic_scanner, setup_latest_scanner, setup_live_scanner, setup_sync_from_latest_scanner, setup_sync_scanner, }; -pub(crate) use test_counter::{TestCounter, TestCounterExt, deploy_counter}; +pub(crate) use test_counter::{TestCounter, deploy_counter}; use alloy::{network::Ethereum, providers::ProviderBuilder}; use alloy_node_bindings::{Anvil, AnvilInstance}; diff --git a/tests/common/test_counter.rs b/tests/common/test_counter.rs index 217338d0..c1f1dee2 100644 --- a/tests/common/test_counter.rs +++ b/tests/common/test_counter.rs @@ -1,5 +1,4 @@ -use alloy::{network::Ethereum, primitives::U256, providers::Provider, sol}; -use event_scanner::test_utils::LogMetadata; +use alloy::{network::Ethereum, sol}; // Shared test contract used across integration tests sol! { @@ -37,41 +36,3 @@ where let contract = TestCounter::deploy(provider).await?; Ok(contract) } - -#[allow(dead_code)] -pub(crate) trait TestCounterExt { - async fn increase_and_get_meta( - &self, - ) -> anyhow::Result>; - async fn decrease_and_get_meta( - &self, - ) -> anyhow::Result>; -} - -impl TestCounterExt for TestCounter::TestCounterInstance

{ - async fn increase_and_get_meta( - &self, - ) -> anyhow::Result> { - let receipt = self.increase().send().await?.get_receipt().await?; - let tx_hash = receipt.transaction_hash; - let new_count = receipt.decoded_log::().unwrap().data.newCount; - Ok(LogMetadata { - event: TestCounter::CountIncreased { newCount: U256::from(new_count) }, - address: *self.address(), - tx_hash, - }) - } - - async fn decrease_and_get_meta( - &self, - ) -> anyhow::Result> { - let receipt = self.decrease().send().await?.get_receipt().await?; - let tx_hash = receipt.transaction_hash; - let new_count = receipt.decoded_log::().unwrap().data.newCount; - Ok(LogMetadata { - event: TestCounter::CountDecreased { newCount: U256::from(new_count) }, - address: *self.address(), - tx_hash, - }) - } -} diff --git a/tests/latest_events/basic.rs b/tests/latest_events/basic.rs index 7d1c97bc..95b4df96 100644 --- a/tests/latest_events/basic.rs +++ b/tests/latest_events/basic.rs @@ -1,8 +1,5 @@ use alloy::{ - eips::BlockNumberOrTag, - primitives::U256, - providers::{Provider, ext::AnvilApi}, - sol_types::SolEvent, + eips::BlockNumberOrTag, primitives::U256, providers::ext::AnvilApi, sol_types::SolEvent, }; use crate::common::{TestCounter, deploy_counter, setup_common, setup_latest_scanner}; @@ -341,7 +338,7 @@ async fn latest_scanner_large_gaps_and_empty_ranges() -> anyhow::Result<()> { #[tokio::test] async fn latest_scanner_boundary_range_single_block() -> anyhow::Result<()> { - let (anvil, _provider, contract, default_filter) = setup_common(None, None).await?; + let (_anvil, provider, contract, default_filter) = setup_common(None, None).await?; contract.increase().send().await?.watch().await?; let receipt = contract.increase().send().await?.get_receipt().await?; From 3274332964bc53fac6642884d64c00ce172ce022 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 6 Nov 2025 20:08:17 +0900 Subject: [PATCH 106/122] fix: test --- tests/sync/from_block.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/sync/from_block.rs b/tests/sync/from_block.rs index 65f4c90f..b5836f33 100644 --- a/tests/sync/from_block.rs +++ b/tests/sync/from_block.rs @@ -82,8 +82,9 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { setup_sync_scanner(None, None, BlockNumberOrTag::Earliest, 5).await?; // mine some initial "historic" blocks - contract.increase().send().await?.watch().await?; - provider.root().anvil_mine(Some(5), None).await?; + for _ in 0..7 { + contract.increase().send().await?.watch().await?; + } scanner.start().await?; From b620c3bddba4ad1bf7180bfcb6fd6dce3db891ca Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 6 Nov 2025 20:14:16 +0900 Subject: [PATCH 107/122] ref: revert changes to block range test --- tests/block_range_scanner.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index 1a3e6a61..1e30c5ac 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -12,7 +12,7 @@ use event_scanner::{ #[tokio::test] async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; - let provider = ProviderBuilder::new().connect(anvil.ws_endpoint().as_str()).await?; + let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; let robust_provider = RobustProvider::new(provider); // --- Zero block confirmations -> stream immediately --- @@ -23,11 +23,11 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh robust_provider.root().anvil_mine(Some(5), None).await?; - assert_next!(stream, 1..=1, timeout = 10); - assert_next!(stream, 2..=2, timeout = 10); - assert_next!(stream, 3..=3, timeout = 10); - assert_next!(stream, 4..=4, timeout = 10); - assert_next!(stream, 5..=5, timeout = 10); + assert_next!(stream, 1..=1); + assert_next!(stream, 2..=2); + assert_next!(stream, 3..=3); + assert_next!(stream, 4..=4); + assert_next!(stream, 5..=5); let mut stream = assert_empty!(stream); robust_provider.root().anvil_mine(Some(1), None).await?; @@ -41,11 +41,11 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh robust_provider.root().anvil_mine(Some(5), None).await?; - assert_next!(stream, 6..=6, timeout = 10); - assert_next!(stream, 7..=7, timeout = 10); - assert_next!(stream, 8..=8, timeout = 10); - assert_next!(stream, 9..=9, timeout = 10); - assert_next!(stream, 10..=10, timeout = 10); + assert_next!(stream, 6..=6); + assert_next!(stream, 7..=7); + assert_next!(stream, 8..=8); + assert_next!(stream, 9..=9); + assert_next!(stream, 10..=10); let mut stream = assert_empty!(stream); robust_provider.root().anvil_mine(Some(1), None).await?; From 13d7529570d6ab114f3daf05c96baeb0d61c1a59 Mon Sep 17 00:00:00 2001 From: Leo Date: Thu, 6 Nov 2025 22:04:50 +0900 Subject: [PATCH 108/122] ref: retry_with_total_timeout --- src/robust_provider.rs | 49 +++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index a803a2a3..be5c1938 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -1,4 +1,4 @@ -use std::{future::Future, sync::Arc, time::Duration}; +use std::{fmt::Debug, future::Future, sync::Arc, time::Duration}; use alloy::{ eips::{BlockId, BlockNumberOrTag}, @@ -230,44 +230,45 @@ impl RobustProvider { /// and all fallback providers failed" if the overall timeout elapses and no fallback /// providers succeed. /// - Propagates any [`RpcError`] from the underlying retries. - async fn retry_with_total_timeout(&self, operation: F) -> Result + async fn retry_with_total_timeout(&self, operation: F) -> Result where F: Fn(RootProvider) -> Fut, Fut: Future>>, { - let mut last_error = None; - - // Try each provider in sequence (first one is primary) - for (idx, provider) in self.providers.iter().enumerate() { - if idx == 0 { - info!("Attempting primary provider"); - } else { - info!("Attempting fallback provider {} out of {}", idx, self.providers.len() - 1); - } + let mut providers = self.providers.iter(); + let primary = providers.next().expect("should have primary provider"); + + let result = self.try_provider_with_timeout(primary, &operation).await; + + if result.is_ok() { + return result; + } + + let mut last_error = result.unwrap_err(); + + if self.providers.len() > 1 { + info!("Primary provider failed, trying fallback provider(s)"); + } - let result = self.try_provider_with_timeout(provider, &operation).await; + // This loop starts at index 1 automatically + for (idx, provider) in providers.enumerate() { + let fallback_num = idx + 1; + info!("Attempting fallback provider {}/{}", fallback_num, self.providers.len() - 1); - match result { + match self.try_provider_with_timeout(provider, &operation).await { Ok(value) => { - if idx > 0 { - info!(provider_num = idx, "Fallback provider succeeded"); - } + info!(provider_num = fallback_num, "Fallback provider succeeded"); return Ok(value); } Err(e) => { - last_error = Some(e); - if idx == 0 && self.providers.len() > 1 { - info!("Primary provider failed, trying fallback provider(s)"); - } else { - error!(provider_num = idx, err = %last_error.as_ref().unwrap(), "Fallback provider failed with error"); - } + error!(provider_num = fallback_num, err = %e, "Fallback provider failed"); + last_error = e; } } } error!("All providers failed or timed out"); - // Return the last error encountered - Err(last_error.unwrap_or(Error::Timeout)) + Err(last_error) } /// Try executing an operation with a specific provider with retry and timeout. From 655124ac842dddf5537cad08f0b8df455280510a Mon Sep 17 00:00:00 2001 From: Leo Date: Fri, 7 Nov 2025 17:00:19 +0900 Subject: [PATCH 109/122] Pubsub checks (#153) Co-authored-by: 0xNeshi --- src/robust_provider.rs | 207 ++++++++++++++++++++++++++++------- tests/block_range_scanner.rs | 4 +- 2 files changed, 169 insertions(+), 42 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index be5c1938..eb277e80 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -85,8 +85,8 @@ impl RobustProvider { /// Set the base delay for exponential backoff retries. #[must_use] - pub fn min_delay(mut self, retry_interval: Duration) -> Self { - self.min_delay = retry_interval; + pub fn min_delay(mut self, min_delay: Duration) -> Self { + self.min_delay = min_delay; self } @@ -105,8 +105,8 @@ impl RobustProvider { /// /// Fallback providers are used when the primary provider times out or fails. #[must_use] - pub fn fallback(mut self, provider: RootProvider) -> Self { - self.providers.push(provider); + pub fn fallback(mut self, provider: impl Provider) -> Self { + self.providers.push(provider.root().to_owned()); self } @@ -122,9 +122,10 @@ impl RobustProvider { ) -> Result { info!("eth_getBlockByNumber called"); let result = self - .retry_with_total_timeout(move |provider| async move { - provider.get_block_by_number(number).await - }) + .retry_with_total_timeout( + move |provider| async move { provider.get_block_by_number(number).await }, + false, + ) .await; if let Err(e) = &result { error!(error = %e, "eth_getByBlockNumber failed"); @@ -144,6 +145,7 @@ impl RobustProvider { let result = self .retry_with_total_timeout( move |provider| async move { provider.get_block_number().await }, + false, ) .await; if let Err(e) = &result { @@ -164,9 +166,10 @@ impl RobustProvider { ) -> Result { info!("eth_getBlockByHash called"); let result = self - .retry_with_total_timeout(move |provider| async move { - provider.get_block_by_hash(hash).await - }) + .retry_with_total_timeout( + move |provider| async move { provider.get_block_by_hash(hash).await }, + false, + ) .await; if let Err(e) = &result { error!(error = %e, "eth_getBlockByHash failed"); @@ -186,6 +189,7 @@ impl RobustProvider { let result = self .retry_with_total_timeout( move |provider| async move { provider.get_logs(filter).await }, + false, ) .await; if let Err(e) = &result { @@ -202,11 +206,12 @@ impl RobustProvider { /// after exhausting retries or if the call times out. pub async fn subscribe_blocks(&self) -> Result, Error> { info!("eth_subscribe called"); - // We need this otherwise error is not clear + // immediately fail if primary does not support pubsub self.root().client().expect_pubsub_frontend(); let result = self .retry_with_total_timeout( move |provider| async move { provider.subscribe_blocks().await }, + true, ) .await; if let Err(e) = &result { @@ -224,17 +229,27 @@ impl RobustProvider { /// If the timeout is exceeded and fallback providers are available, it will /// attempt to use each fallback provider in sequence. /// + /// If `require_pubsub` is true, providers that don't support pubsub will be skipped. + /// /// # Errors /// /// - Returns [`RpcError`] with message "total operation timeout exceeded /// and all fallback providers failed" if the overall timeout elapses and no fallback /// providers succeed. + /// - Returns [`RpcError::Transport(TransportErrorKind::PubsubUnavailable)`] if `require_pubsub` + /// is true and all providers don't support pubsub. /// - Propagates any [`RpcError`] from the underlying retries. - async fn retry_with_total_timeout(&self, operation: F) -> Result + async fn retry_with_total_timeout( + &self, + operation: F, + require_pubsub: bool, + ) -> Result where F: Fn(RootProvider) -> Fut, Fut: Future>>, { + let mut skipped_count = 0; + let mut providers = self.providers.iter(); let primary = providers.next().expect("should have primary provider"); @@ -253,6 +268,11 @@ impl RobustProvider { // This loop starts at index 1 automatically for (idx, provider) in providers.enumerate() { let fallback_num = idx + 1; + if require_pubsub && !Self::supports_pubsub(provider) { + info!("Fallback provider {} doesn't support pubsub, skipping", fallback_num); + skipped_count += 1; + continue; + } info!("Attempting fallback provider {}/{}", fallback_num, self.providers.len() - 1); match self.try_provider_with_timeout(provider, &operation).await { @@ -267,6 +287,13 @@ impl RobustProvider { } } + // If all providers were skipped due to pubsub requirement + if skipped_count == self.providers.len() { + error!("All providers skipped - none support pubsub"); + return Err(RpcError::Transport(TransportErrorKind::PubsubUnavailable).into()); + } + + // Return the last error encountered error!("All providers failed or timed out"); Err(last_error) } @@ -298,25 +325,30 @@ impl RobustProvider { .map_err(Error::from)? .map_err(Error::from) } + + /// Check if a provider supports pubsub + fn supports_pubsub(provider: &RootProvider) -> bool { + provider.client().pubsub_frontend().is_some() + } } #[cfg(test)] mod tests { use super::*; - use alloy::network::Ethereum; + use alloy::{ + network::Ethereum, + providers::{ProviderBuilder, WsConnect}, + }; + use alloy_node_bindings::Anvil; use std::sync::atomic::{AtomicUsize, Ordering}; use tokio::time::sleep; - fn test_provider( - timeout: u64, - max_retries: usize, - retry_interval: u64, - ) -> RobustProvider { + fn test_provider(timeout: u64, max_retries: usize, min_delay: u64) -> RobustProvider { RobustProvider { providers: vec![RootProvider::new_http("http://localhost:8545".parse().unwrap())], max_timeout: Duration::from_millis(timeout), max_retries, - min_delay: Duration::from_millis(retry_interval), + min_delay: Duration::from_millis(min_delay), } } @@ -327,11 +359,14 @@ mod tests { let call_count = AtomicUsize::new(0); let result = provider - .retry_with_total_timeout(|_| async { - call_count.fetch_add(1, Ordering::SeqCst); - let count = call_count.load(Ordering::SeqCst); - Ok(count) - }) + .retry_with_total_timeout( + |_| async { + call_count.fetch_add(1, Ordering::SeqCst); + let count = call_count.load(Ordering::SeqCst); + Ok(count) + }, + false, + ) .await; assert!(matches!(result, Ok(1))); @@ -344,14 +379,17 @@ mod tests { let call_count = AtomicUsize::new(0); let result = provider - .retry_with_total_timeout(|_| async { - call_count.fetch_add(1, Ordering::SeqCst); - let count = call_count.load(Ordering::SeqCst); - match count { - 3 => Ok(count), - _ => Err(TransportErrorKind::BackendGone.into()), - } - }) + .retry_with_total_timeout( + |_| async { + call_count.fetch_add(1, Ordering::SeqCst); + let count = call_count.load(Ordering::SeqCst); + match count { + 3 => Ok(count), + _ => Err(TransportErrorKind::BackendGone.into()), + } + }, + false, + ) .await; assert!(matches!(result, Ok(3))); @@ -364,10 +402,13 @@ mod tests { let call_count = AtomicUsize::new(0); let result: Result<(), Error> = provider - .retry_with_total_timeout(|_| async { - call_count.fetch_add(1, Ordering::SeqCst); - Err(TransportErrorKind::BackendGone.into()) - }) + .retry_with_total_timeout( + |_| async { + call_count.fetch_add(1, Ordering::SeqCst); + Err(TransportErrorKind::BackendGone.into()) + }, + false, + ) .await; assert!(matches!(result, Err(Error::RpcError(_)))); @@ -380,12 +421,98 @@ mod tests { let provider = test_provider(max_timeout, 10, 1); let result = provider - .retry_with_total_timeout(move |_provider| async move { - sleep(Duration::from_millis(max_timeout + 10)).await; - Ok(42) - }) + .retry_with_total_timeout( + move |_provider| async move { + sleep(Duration::from_millis(max_timeout + 10)).await; + Ok(42) + }, + false, + ) .await; assert!(matches!(result, Err(Error::Timeout))); } + + #[tokio::test] + async fn test_subscribe_fails_causes_backup_to_be_used() { + let anvil_1 = Anvil::new().port(2222_u16).try_spawn().expect("Failed to start anvil"); + + let ws_provider_1 = ProviderBuilder::new() + .connect_ws(WsConnect::new(anvil_1.ws_endpoint_url().as_str())) + .await + .expect("Failed to connect to WS"); + + let anvil_2 = Anvil::new().port(1111_u16).try_spawn().expect("Failed to start anvil"); + + let ws_provider_2 = ProviderBuilder::new() + .connect_ws(WsConnect::new(anvil_2.ws_endpoint_url().as_str())) + .await + .expect("Failed to connect to WS"); + + let robust = RobustProvider::new(ws_provider_1) + .fallback(ws_provider_2) + .max_timeout(Duration::from_secs(5)) + .max_retries(10) + .min_delay(Duration::from_millis(100)); + + drop(anvil_1); + + let result = robust.subscribe_blocks().await; + + assert!(result.is_ok(), "Expected subscribe blocks to work"); + } + + #[tokio::test] + #[should_panic(expected = "called pubsub_frontend on a non-pubsub transport")] + async fn test_subscribe_fails_if_primary_provider_lacks_pubsub() { + let anvil = Anvil::new().try_spawn().expect("Failed to start anvil"); + + let http_provider = ProviderBuilder::new().connect_http(anvil.endpoint_url()); + let ws_provider = ProviderBuilder::new() + .connect_ws(WsConnect::new(anvil.ws_endpoint_url().as_str())) + .await + .expect("Failed to connect to WS"); + + let robust = RobustProvider::new(http_provider) + .fallback(ws_provider) + .max_timeout(Duration::from_secs(5)) + .max_retries(10) + .min_delay(Duration::from_millis(100)); + + let _ = robust.subscribe_blocks().await; + } + + #[tokio::test] + async fn test_ws_fails_http_fallback_returns_primary_error() { + let anvil_1 = Anvil::new().try_spawn().expect("Failed to start anvil"); + + let ws_provider = ProviderBuilder::new() + .connect_ws(WsConnect::new(anvil_1.ws_endpoint_url().as_str())) + .await + .expect("Failed to connect to WS"); + + let anvil_2 = Anvil::new().port(8222_u16).try_spawn().expect("Failed to start anvil"); + let http_provider = ProviderBuilder::new().connect_http(anvil_2.endpoint_url()); + + let robust = RobustProvider::new(ws_provider.clone()) + .fallback(http_provider) + .max_timeout(Duration::from_millis(500)) + .max_retries(0) + .min_delay(Duration::from_millis(10)); + + // force ws_provider to fail and return BackendGone + drop(anvil_1); + + let err = robust.subscribe_blocks().await.unwrap_err(); + + // The error should be either a Timeout or BackendGone from the primary WS provider, + // NOT a PubsubUnavailable error (which would indicate HTTP fallback was attempted) + match err { + Error::Timeout => {} + Error::RpcError(e) => { + assert!(matches!(e.as_ref(), RpcError::Transport(TransportErrorKind::BackendGone))); + } + Error::BlockNotFound(id) => panic!("Unexpected error type: BlockNotFound({id})"), + } + } } diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index 1e30c5ac..5e8c393d 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -32,7 +32,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh robust_provider.root().anvil_mine(Some(1), None).await?; - assert_next!(stream, 6..=6); + assert_next!(stream, 6..=6, timeout = 10); assert_empty!(stream); // --- 1 block confirmation --- @@ -50,7 +50,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh robust_provider.root().anvil_mine(Some(1), None).await?; - assert_next!(stream, 11..=11); + assert_next!(stream, 11..=11, timeout = 10); assert_empty!(stream); Ok(()) From 4873a137b95bd1f760c36c5e2a03bf5a4277edc6 Mon Sep 17 00:00:00 2001 From: Leo Date: Fri, 7 Nov 2025 18:52:30 +0900 Subject: [PATCH 110/122] ref: root --> primary provider --- src/robust_provider.rs | 4 ++-- tests/block_range_scanner.rs | 34 ++++++++++++++++++++-------------- tests/common/setup_scanner.rs | 2 +- tests/latest_events/basic.rs | 6 +++--- tests/live/basic.rs | 2 +- tests/live/reorg.rs | 14 +++++++------- tests/sync/from_block.rs | 4 ++-- tests/sync/from_latest.rs | 4 ++-- 8 files changed, 38 insertions(+), 32 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index eb277e80..fc7791ea 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -96,7 +96,7 @@ impl RobustProvider { /// /// If there are no providers set (this should never happen) #[must_use] - pub fn root(&self) -> &RootProvider { + pub fn primary(&self) -> &RootProvider { // Safe to unwrap because we always have at least one provider self.providers.first().expect("providers vector should never be empty") } @@ -207,7 +207,7 @@ impl RobustProvider { pub async fn subscribe_blocks(&self) -> Result, Error> { info!("eth_subscribe called"); // immediately fail if primary does not support pubsub - self.root().client().expect_pubsub_frontend(); + self.primary().client().expect_pubsub_frontend(); let result = self .retry_with_total_timeout( move |provider| async move { provider.subscribe_blocks().await }, diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index 5e8c393d..78bb3113 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -21,7 +21,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh let mut stream = client.stream_live(0).await?; - robust_provider.root().anvil_mine(Some(5), None).await?; + robust_provider.primary().anvil_mine(Some(5), None).await?; assert_next!(stream, 1..=1); assert_next!(stream, 2..=2); @@ -30,7 +30,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh assert_next!(stream, 5..=5); let mut stream = assert_empty!(stream); - robust_provider.root().anvil_mine(Some(1), None).await?; + robust_provider.primary().anvil_mine(Some(1), None).await?; assert_next!(stream, 6..=6, timeout = 10); assert_empty!(stream); @@ -39,7 +39,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh let mut stream = client.stream_live(1).await?; - robust_provider.root().anvil_mine(Some(5), None).await?; + robust_provider.primary().anvil_mine(Some(5), None).await?; assert_next!(stream, 6..=6); assert_next!(stream, 7..=7); @@ -48,7 +48,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh assert_next!(stream, 10..=10); let mut stream = assert_empty!(stream); - robust_provider.root().anvil_mine(Some(1), None).await?; + robust_provider.primary().anvil_mine(Some(1), None).await?; assert_next!(stream, 11..=11, timeout = 10); assert_empty!(stream); @@ -64,20 +64,20 @@ async fn stream_from_latest_starts_at_tip_not_confirmed() -> anyhow::Result<()> let robust_provider = RobustProvider::new(provider); let client = BlockRangeScanner::new().connect(robust_provider.clone()).run()?; - robust_provider.root().anvil_mine(Some(20), None).await?; + robust_provider.primary().anvil_mine(Some(20), None).await?; let stream = client.stream_from(BlockNumberOrTag::Latest, 5).await?; let stream = assert_empty!(stream); - robust_provider.root().anvil_mine(Some(4), None).await?; + robust_provider.primary().anvil_mine(Some(4), None).await?; let mut stream = assert_empty!(stream); - robust_provider.root().anvil_mine(Some(1), None).await?; + robust_provider.primary().anvil_mine(Some(1), None).await?; assert_next!(stream, 20..=20); let mut stream = assert_empty!(stream); - robust_provider.root().anvil_mine(Some(1), None).await?; + robust_provider.primary().anvil_mine(Some(1), None).await?; assert_next!(stream, 21..=21); assert_empty!(stream); @@ -95,7 +95,7 @@ async fn continuous_blocks_if_reorg_less_than_block_confirmation() -> anyhow::Re let mut stream = client.stream_live(5).await?; // mine initial blocks - robust_provider.root().anvil_mine(Some(10), None).await?; + robust_provider.primary().anvil_mine(Some(10), None).await?; // assert initial block ranges immediately to avoid Anvil race condition: // @@ -110,9 +110,12 @@ async fn continuous_blocks_if_reorg_less_than_block_confirmation() -> anyhow::Re assert_next!(stream, 5..=5); // reorg less blocks than the block_confirmation config - robust_provider.root().anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs: vec![] }).await?; + robust_provider + .primary() + .anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs: vec![] }) + .await?; // mint additional blocks so the scanner processes reorged blocks - robust_provider.root().anvil_mine(Some(5), None).await?; + robust_provider.primary().anvil_mine(Some(5), None).await?; // no ReorgDetected should be emitted assert_next!(stream, 6..=6); @@ -136,7 +139,7 @@ async fn shallow_block_confirmation_does_not_mitigate_reorg() -> anyhow::Result< let mut stream = client.stream_live(3).await?; // mine initial blocks - robust_provider.root().anvil_mine(Some(10), None).await?; + robust_provider.primary().anvil_mine(Some(10), None).await?; // assert initial block ranges immediately to avoid Anvil race condition: // @@ -153,9 +156,12 @@ async fn shallow_block_confirmation_does_not_mitigate_reorg() -> anyhow::Result< assert_next!(stream, 7..=7); // reorg more blocks than the block_confirmation config - robust_provider.root().anvil_reorg(ReorgOptions { depth: 8, tx_block_pairs: vec![] }).await?; + robust_provider + .primary() + .anvil_reorg(ReorgOptions { depth: 8, tx_block_pairs: vec![] }) + .await?; // mint additional blocks - robust_provider.root().anvil_mine(Some(3), None).await?; + robust_provider.primary().anvil_mine(Some(3), None).await?; assert_next!(stream, ScannerStatus::ReorgDetected); assert_next!(stream, 0..=0); diff --git a/tests/common/setup_scanner.rs b/tests/common/setup_scanner.rs index 8c70601d..7b8672bd 100644 --- a/tests/common/setup_scanner.rs +++ b/tests/common/setup_scanner.rs @@ -46,7 +46,7 @@ pub async fn setup_common( )> { let anvil = spawn_anvil(block_interval)?; let provider = build_provider(&anvil).await?; - let contract = deploy_counter(provider.root().clone()).await?; + let contract = deploy_counter(provider.primary().clone()).await?; let default_filter = EventFilter::new().contract_address(*contract.address()).event(CountIncreased::SIGNATURE); diff --git a/tests/latest_events/basic.rs b/tests/latest_events/basic.rs index 95b4df96..eb43b8e9 100644 --- a/tests/latest_events/basic.rs +++ b/tests/latest_events/basic.rs @@ -92,7 +92,7 @@ async fn latest_scanner_respects_range_subset() -> anyhow::Result<()> { contract.increase().send().await?.watch().await?; // manual empty block minting - provider.root().anvil_mine(Some(2), None).await?; + provider.primary().anvil_mine(Some(2), None).await?; let head = provider.get_block_number().await?; // Choose a subrange covering last 4 blocks @@ -272,7 +272,7 @@ async fn latest_scanner_ignores_non_tracked_contract() -> anyhow::Result<()> { let scanner = setup.scanner; let contract_a = setup.contract; - let contract_b = deploy_counter(provider.root()).await?; + let contract_b = deploy_counter(provider.primary()).await?; // Listener only for contract A CountIncreased let mut stream_a = setup.stream; @@ -309,7 +309,7 @@ async fn latest_scanner_large_gaps_and_empty_ranges() -> anyhow::Result<()> { contract.increase().send().await?.watch().await?; // Mine 10 empty blocks - provider.root().anvil_mine(Some(10), None).await?; + provider.primary().anvil_mine(Some(10), None).await?; // Emit 1 more event contract.increase().send().await?.watch().await?; diff --git a/tests/live/basic.rs b/tests/live/basic.rs index 829012aa..cab23c0d 100644 --- a/tests/live/basic.rs +++ b/tests/live/basic.rs @@ -65,7 +65,7 @@ async fn multiple_contracts_same_event_isolate_callbacks() -> anyhow::Result<()> let setup = setup_live_scanner(Some(0.1), None, 0).await?; let provider = setup.provider.clone(); let a = setup.contract.clone(); - let b = deploy_counter(provider.root().clone()).await?; + let b = deploy_counter(provider.primary().clone()).await?; let a_filter = EventFilter::new() .contract_address(*a.address()) diff --git a/tests/live/reorg.rs b/tests/live/reorg.rs index 61162b9b..30af9e3a 100644 --- a/tests/live/reorg.rs +++ b/tests/live/reorg.rs @@ -35,7 +35,7 @@ async fn reorg_rescans_events_within_same_block() -> anyhow::Result<()> { (TransactionData::JSON(contract.increase().into_transaction_request()), 0), ]; - provider.root().anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await?; + provider.primary().anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await?; // assert expected messages post-reorg assert_next!(stream, ScannerStatus::ReorgDetected); @@ -79,7 +79,7 @@ async fn reorg_rescans_events_with_ascending_blocks() -> anyhow::Result<()> { (TransactionData::JSON(contract.increase().into_transaction_request()), 2), ]; - provider.root().anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await?; + provider.primary().anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs }).await?; // assert expected messages post-reorg assert_next!(stream, ScannerStatus::ReorgDetected); @@ -114,7 +114,7 @@ async fn reorg_depth_one() -> anyhow::Result<()> { let tx_block_pairs = vec![(TransactionData::JSON(contract.increase().into_transaction_request()), 0)]; - provider.root().anvil_reorg(ReorgOptions { depth: 1, tx_block_pairs }).await?; + provider.primary().anvil_reorg(ReorgOptions { depth: 1, tx_block_pairs }).await?; // assert expected messages post-reorg assert_next!(stream, ScannerStatus::ReorgDetected); @@ -147,7 +147,7 @@ async fn reorg_depth_two() -> anyhow::Result<()> { let tx_block_pairs = vec![(TransactionData::JSON(contract.increase().into_transaction_request()), 0)]; - provider.root().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; + provider.primary().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; // assert expected messages post-reorg assert_next!(stream, ScannerStatus::ReorgDetected); @@ -166,7 +166,7 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { scanner.start().await?; // mine some initial blocks - provider.root().anvil_mine(Some(10), None).await?; + provider.primary().anvil_mine(Some(10), None).await?; // emit initial events for _ in 0..4 { @@ -184,13 +184,13 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { (TransactionData::JSON(contract.increase().into_transaction_request()), 0), ]; - provider.root().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; + provider.primary().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; // assert that still no events have been streamed let mut stream = assert_empty!(stream); // mine some additional post-reorg blocks - provider.root().anvil_mine(Some(10), None).await?; + provider.primary().anvil_mine(Some(10), None).await?; // no `ReorgDetected` should be emitted assert_next!(stream, &[CountIncreased { newCount: U256::from(1) }]); diff --git a/tests/sync/from_block.rs b/tests/sync/from_block.rs index b5836f33..1b146791 100644 --- a/tests/sync/from_block.rs +++ b/tests/sync/from_block.rs @@ -115,13 +115,13 @@ async fn block_confirmations_mitigate_reorgs() -> anyhow::Result<()> { (TransactionData::JSON(contract.increase().into_transaction_request()), 0), (TransactionData::JSON(contract.increase().into_transaction_request()), 0), ]; - provider.root().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; + provider.primary().anvil_reorg(ReorgOptions { depth: 2, tx_block_pairs }).await?; // assert that still no events have been streamed let mut stream = assert_empty!(stream); // mine some additional post-reorg blocks to confirm previous blocks with logs - provider.root().anvil_mine(Some(10), None).await?; + provider.primary().anvil_mine(Some(10), None).await?; // no `ReorgDetected` should be emitted assert_next!(stream, &[TestCounter::CountIncreased { newCount: U256::from(5) }]); diff --git a/tests/sync/from_latest.rs b/tests/sync/from_latest.rs index 19836502..1cafed26 100644 --- a/tests/sync/from_latest.rs +++ b/tests/sync/from_latest.rs @@ -142,12 +142,12 @@ async fn scan_latest_then_live_boundary_no_duplication() -> anyhow::Result<()> { // Historical: emit 3, mine 1 empty block to form a clear boundary contract.increase().send().await?.watch().await?; - provider.root().anvil_mine(Some(1), None).await?; + provider.primary().anvil_mine(Some(1), None).await?; contract.increase().send().await?.watch().await?; contract.increase().send().await?.watch().await?; - provider.root().anvil_mine(Some(1), None).await?; + provider.primary().anvil_mine(Some(1), None).await?; scanner.start().await?; From d50ec858023ada0aba5934538751f18a44eb07b1 Mon Sep 17 00:00:00 2001 From: Leo Date: Fri, 7 Nov 2025 18:56:18 +0900 Subject: [PATCH 111/122] ref: dont panic on pubsub nt supported --- src/robust_provider.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index fc7791ea..338faf38 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -207,7 +207,10 @@ impl RobustProvider { pub async fn subscribe_blocks(&self) -> Result, Error> { info!("eth_subscribe called"); // immediately fail if primary does not support pubsub - self.primary().client().expect_pubsub_frontend(); + if !Self::supports_pubsub(self.primary()) { + return Err(RpcError::Transport(TransportErrorKind::PubsubUnavailable).into()); + } + let result = self .retry_with_total_timeout( move |provider| async move { provider.subscribe_blocks().await }, From 532209ee8dbe922cc56d6a7a950bf669f3276c4e Mon Sep 17 00:00:00 2001 From: Leo Date: Fri, 7 Nov 2025 19:03:18 +0900 Subject: [PATCH 112/122] ref: remove skipped count + return primary provider error --- src/robust_provider.rs | 51 +++++++++++++++++++++++++++++++----------- 1 file changed, 38 insertions(+), 13 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 338faf38..d1929c8e 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -251,8 +251,6 @@ impl RobustProvider { F: Fn(RootProvider) -> Fut, Fut: Future>>, { - let mut skipped_count = 0; - let mut providers = self.providers.iter(); let primary = providers.next().expect("should have primary provider"); @@ -273,7 +271,6 @@ impl RobustProvider { let fallback_num = idx + 1; if require_pubsub && !Self::supports_pubsub(provider) { info!("Fallback provider {} doesn't support pubsub, skipping", fallback_num); - skipped_count += 1; continue; } info!("Attempting fallback provider {}/{}", fallback_num, self.providers.len() - 1); @@ -290,12 +287,6 @@ impl RobustProvider { } } - // If all providers were skipped due to pubsub requirement - if skipped_count == self.providers.len() { - error!("All providers skipped - none support pubsub"); - return Err(RpcError::Transport(TransportErrorKind::PubsubUnavailable).into()); - } - // Return the last error encountered error!("All providers failed or timed out"); Err(last_error) @@ -438,14 +429,14 @@ mod tests { #[tokio::test] async fn test_subscribe_fails_causes_backup_to_be_used() { - let anvil_1 = Anvil::new().port(2222_u16).try_spawn().expect("Failed to start anvil"); + let anvil_1 = Anvil::new().try_spawn().expect("Failed to start anvil"); let ws_provider_1 = ProviderBuilder::new() .connect_ws(WsConnect::new(anvil_1.ws_endpoint_url().as_str())) .await .expect("Failed to connect to WS"); - let anvil_2 = Anvil::new().port(1111_u16).try_spawn().expect("Failed to start anvil"); + let anvil_2 = Anvil::new().port(8222_u16).try_spawn().expect("Failed to start anvil"); let ws_provider_2 = ProviderBuilder::new() .connect_ws(WsConnect::new(anvil_2.ws_endpoint_url().as_str())) @@ -466,7 +457,31 @@ mod tests { } #[tokio::test] - #[should_panic(expected = "called pubsub_frontend on a non-pubsub transport")] + async fn test_subscribe_fails_when_all_providers_lack_pubsub() { + let anvil = Anvil::new().try_spawn().expect("Failed to start anvil"); + + let http_provider = ProviderBuilder::new().connect_http(anvil.endpoint_url()); + + let robust = RobustProvider::new(http_provider.clone()) + .fallback(http_provider) + .max_timeout(Duration::from_secs(5)) + .max_retries(10) + .min_delay(Duration::from_millis(100)); + + let result = robust.subscribe_blocks().await.unwrap_err(); + + match result { + Error::RpcError(e) => { + assert!(matches!( + e.as_ref(), + RpcError::Transport(TransportErrorKind::PubsubUnavailable) + )); + } + _ => panic!("Should be a pubsub error"), + } + } + + #[tokio::test] async fn test_subscribe_fails_if_primary_provider_lacks_pubsub() { let anvil = Anvil::new().try_spawn().expect("Failed to start anvil"); @@ -482,7 +497,17 @@ mod tests { .max_retries(10) .min_delay(Duration::from_millis(100)); - let _ = robust.subscribe_blocks().await; + let result = robust.subscribe_blocks().await.unwrap_err(); + + match result { + Error::RpcError(e) => { + assert!(matches!( + e.as_ref(), + RpcError::Transport(TransportErrorKind::PubsubUnavailable) + )); + } + _ => panic!("Should be a pubsub error"), + } } #[tokio::test] From e0dbfa9f63daf2560f5dedc90d21744349b63fcb Mon Sep 17 00:00:00 2001 From: Leo Date: Fri, 7 Nov 2025 19:04:15 +0900 Subject: [PATCH 113/122] ref: take len out of loop --- src/robust_provider.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index d1929c8e..39fc3725 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -262,7 +262,8 @@ impl RobustProvider { let mut last_error = result.unwrap_err(); - if self.providers.len() > 1 { + let num_providers = self.providers.len(); + if num_providers > 1 { info!("Primary provider failed, trying fallback provider(s)"); } @@ -273,7 +274,7 @@ impl RobustProvider { info!("Fallback provider {} doesn't support pubsub, skipping", fallback_num); continue; } - info!("Attempting fallback provider {}/{}", fallback_num, self.providers.len() - 1); + info!("Attempting fallback provider {}/{}", fallback_num, num_providers - 1); match self.try_provider_with_timeout(provider, &operation).await { Ok(value) => { From 394fccfca7f30ff97ed002dbedc6d9d9f5c00064 Mon Sep 17 00:00:00 2001 From: Leo Date: Fri, 7 Nov 2025 19:11:55 +0900 Subject: [PATCH 114/122] ref: better logging --- src/robust_provider.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 39fc3725..a24b1ef4 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -289,7 +289,7 @@ impl RobustProvider { } // Return the last error encountered - error!("All providers failed or timed out"); + error!("All providers failed or timed out - returning the last providers attempt's error"); Err(last_error) } From ba2275f3cfd83eb7ffdb452db7a1c51c723086c7 Mon Sep 17 00:00:00 2001 From: Leo Date: Fri, 7 Nov 2025 19:13:11 +0900 Subject: [PATCH 115/122] ref: remove async test ref: remove more --- src/event_scanner/scanner/mod.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/event_scanner/scanner/mod.rs b/src/event_scanner/scanner/mod.rs index 87c5c116..e61e379f 100644 --- a/src/event_scanner/scanner/mod.rs +++ b/src/event_scanner/scanner/mod.rs @@ -447,8 +447,8 @@ mod tests { assert_eq!(builder.config.block_confirmations, DEFAULT_BLOCK_CONFIRMATIONS); } - #[tokio::test] - async fn test_historic_event_stream_listeners_vector_updates() { + #[test] + fn test_historic_event_stream_listeners_vector_updates() { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); let robust_provider = RobustProvider::new(provider.clone()); let mut scanner = EventScannerBuilder::historic().connect(robust_provider); @@ -463,8 +463,8 @@ mod tests { assert_eq!(scanner.listeners.len(), 3); } - #[tokio::test] - async fn test_historic_event_stream_channel_capacity() { + #[test] + fn test_historic_event_stream_channel_capacity() { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); let robust_provider = RobustProvider::new(provider.clone()); let mut scanner = EventScannerBuilder::historic().connect(robust_provider); From 4f171847f6564b32841ec9fc15b2378885caeae3 Mon Sep 17 00:00:00 2001 From: Leo Date: Fri, 7 Nov 2025 19:16:36 +0900 Subject: [PATCH 116/122] doc: better error returned --- src/robust_provider.rs | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index a24b1ef4..54e8e6a1 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -114,8 +114,9 @@ impl RobustProvider { /// /// # Errors /// - /// Returns an error if RPC call fails repeatedly even - /// after exhausting retries or if the call times out. + /// Returns an error if the RPC call fails after exhausting all retry attempts + /// or if the call times out. When fallback providers are configured, the error + /// returned will be from the final provider that was attempted. pub async fn get_block_by_number( &self, number: BlockNumberOrTag, @@ -138,8 +139,9 @@ impl RobustProvider { /// /// # Errors /// - /// Returns an error if RPC call fails repeatedly even - /// after exhausting retries or if the call times out. + /// Returns an error if the RPC call fails after exhausting all retry attempts + /// or if the call times out. When fallback providers are configured, the error + /// returned will be from the final provider that was attempted. pub async fn get_block_number(&self) -> Result { info!("eth_getBlockNumber called"); let result = self @@ -158,8 +160,9 @@ impl RobustProvider { /// /// # Errors /// - /// Returns an error if RPC call fails repeatedly even - /// after exhausting retries or if the call times out. + /// Returns an error if the RPC call fails after exhausting all retry attempts + /// or if the call times out. When fallback providers are configured, the error + /// returned will be from the final provider that was attempted. pub async fn get_block_by_hash( &self, hash: alloy::primitives::BlockHash, @@ -182,8 +185,9 @@ impl RobustProvider { /// /// # Errors /// - /// Returns an error if RPC call fails repeatedly even - /// after exhausting retries or if the call times out. + /// Returns an error if the RPC call fails after exhausting all retry attempts + /// or if the call times out. When fallback providers are configured, the error + /// returned will be from the final provider that was attempted. pub async fn get_logs(&self, filter: &Filter) -> Result, Error> { info!("eth_getLogs called"); let result = self @@ -202,8 +206,10 @@ impl RobustProvider { /// /// # Errors /// - /// Returns an error if RPC call fails repeatedly even - /// after exhausting retries or if the call times out. + /// Returns an error if the primary provider does not support pubsub, if the RPC + /// call fails after exhausting all retry attempts, or if the call times out. + /// When fallback providers are configured, the error returned will be from the + /// final provider that was attempted. pub async fn subscribe_blocks(&self) -> Result, Error> { info!("eth_subscribe called"); // immediately fail if primary does not support pubsub From 58d6690071bc07fb441154b6f24b4e5a47915cc2 Mon Sep 17 00:00:00 2001 From: Leo Date: Fri, 7 Nov 2025 19:25:26 +0900 Subject: [PATCH 117/122] feat: provider better feature examples of robust provider in the examples --- examples/historical_scanning/main.rs | 6 +++++- examples/latest_events_scanning/main.rs | 6 +++++- examples/live_scanning/main.rs | 6 +++++- examples/sync_from_block_scanning/main.rs | 6 +++++- examples/sync_from_latest_scanning/main.rs | 6 +++++- 5 files changed, 25 insertions(+), 5 deletions(-) diff --git a/examples/historical_scanning/main.rs b/examples/historical_scanning/main.rs index c1ff1d48..9adcba49 100644 --- a/examples/historical_scanning/main.rs +++ b/examples/historical_scanning/main.rs @@ -52,7 +52,11 @@ async fn main() -> anyhow::Result<()> { let _ = counter_contract.increase().send().await?.get_receipt().await?; - let robust_provider = RobustProvider::new(provider.clone()); + let robust_provider = RobustProvider::new(provider) + .max_timeout(std::time::Duration::from_secs(30)) + .max_retries(5) + .min_delay(std::time::Duration::from_millis(500)); + let mut scanner = EventScannerBuilder::historic().connect(robust_provider); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/latest_events_scanning/main.rs b/examples/latest_events_scanning/main.rs index fae0f04f..dcbcf2ff 100644 --- a/examples/latest_events_scanning/main.rs +++ b/examples/latest_events_scanning/main.rs @@ -49,7 +49,11 @@ async fn main() -> anyhow::Result<()> { .contract_address(*contract_address) .event(Counter::CountIncreased::SIGNATURE); - let robust_provider = RobustProvider::new(provider.clone()); + let robust_provider = RobustProvider::new(provider) + .max_timeout(std::time::Duration::from_secs(30)) + .max_retries(5) + .min_delay(std::time::Duration::from_millis(500)); + let mut scanner = EventScannerBuilder::latest(5).connect(robust_provider); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/live_scanning/main.rs b/examples/live_scanning/main.rs index 42f57b1f..47d7bef0 100644 --- a/examples/live_scanning/main.rs +++ b/examples/live_scanning/main.rs @@ -50,7 +50,11 @@ async fn main() -> anyhow::Result<()> { .contract_address(*contract_address) .event(Counter::CountIncreased::SIGNATURE); - let robust_provider = RobustProvider::new(provider.clone()); + let robust_provider = RobustProvider::new(provider) + .max_timeout(std::time::Duration::from_secs(30)) + .max_retries(5) + .min_delay(std::time::Duration::from_millis(500)); + let mut scanner = EventScannerBuilder::live().connect(robust_provider); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/sync_from_block_scanning/main.rs b/examples/sync_from_block_scanning/main.rs index d9c270fb..d8a0107d 100644 --- a/examples/sync_from_block_scanning/main.rs +++ b/examples/sync_from_block_scanning/main.rs @@ -58,7 +58,11 @@ async fn main() -> anyhow::Result<()> { info!("Historical event {} created", i + 1); } - let robust_provider = RobustProvider::new(provider.clone()); + let robust_provider = RobustProvider::new(provider) + .max_timeout(Duration::from_secs(30)) + .max_retries(5) + .min_delay(Duration::from_millis(500)); + let mut scanner = EventScannerBuilder::sync().from_block(0).connect(robust_provider); let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/sync_from_latest_scanning/main.rs b/examples/sync_from_latest_scanning/main.rs index 554da2b5..a5c716e5 100644 --- a/examples/sync_from_latest_scanning/main.rs +++ b/examples/sync_from_latest_scanning/main.rs @@ -50,7 +50,11 @@ async fn main() -> anyhow::Result<()> { .contract_address(*contract_address) .event(Counter::CountIncreased::SIGNATURE); - let robust_provider = RobustProvider::new(provider.clone()); + let robust_provider = RobustProvider::new(provider) + .max_timeout(std::time::Duration::from_secs(30)) + .max_retries(5) + .min_delay(std::time::Duration::from_millis(500)); + let mut client = EventScannerBuilder::sync().from_latest(5).connect(robust_provider); let mut stream = client.subscribe(increase_filter); From 87d602979bd798b5d1c93b34b84d0ec3550fc67b Mon Sep 17 00:00:00 2001 From: Leo Date: Fri, 7 Nov 2025 19:27:35 +0900 Subject: [PATCH 118/122] feat: change port --- src/robust_provider.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 54e8e6a1..5b468a71 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -526,7 +526,7 @@ mod tests { .await .expect("Failed to connect to WS"); - let anvil_2 = Anvil::new().port(8222_u16).try_spawn().expect("Failed to start anvil"); + let anvil_2 = Anvil::new().port(8225_u16).try_spawn().expect("Failed to start anvil"); let http_provider = ProviderBuilder::new().connect_http(anvil_2.endpoint_url()); let robust = RobustProvider::new(ws_provider.clone()) From fbec4a8c51c975634933ceb7435bda7b7b74041c Mon Sep 17 00:00:00 2001 From: Leo Date: Fri, 7 Nov 2025 20:50:24 +0900 Subject: [PATCH 119/122] ref: remove timeout --- tests/block_range_scanner.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index 78bb3113..7a666ab2 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -32,7 +32,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh robust_provider.primary().anvil_mine(Some(1), None).await?; - assert_next!(stream, 6..=6, timeout = 10); + assert_next!(stream, 6..=6); assert_empty!(stream); // --- 1 block confirmation --- @@ -50,7 +50,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh robust_provider.primary().anvil_mine(Some(1), None).await?; - assert_next!(stream, 11..=11, timeout = 10); + assert_next!(stream, 11..=11); assert_empty!(stream); Ok(()) From 16128db08d86815a9f361359f93e060df0e503ff Mon Sep 17 00:00:00 2001 From: Nenad Date: Fri, 7 Nov 2025 14:24:09 +0100 Subject: [PATCH 120/122] feat: Support conversions (#164) --- README.md | 17 +- examples/historical_scanning/main.rs | 12 +- examples/latest_events_scanning/main.rs | 12 +- examples/live_scanning/main.rs | 12 +- examples/sync_from_block_scanning/main.rs | 12 +- examples/sync_from_latest_scanning/main.rs | 12 +- src/block_range_scanner.rs | 21 +- src/event_scanner/scanner/mod.rs | 66 ++--- src/event_scanner/scanner/sync/mod.rs | 21 +- src/robust_provider.rs | 266 ++++++++++++++++----- tests/block_range_scanner.rs | 89 +++---- tests/common/mod.rs | 4 +- tests/common/setup_scanner.rs | 21 +- tests/latest_events/basic.rs | 6 +- 14 files changed, 369 insertions(+), 202 deletions(-) diff --git a/README.md b/README.md index f3d8a3f6..bf74df65 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ Create an event stream for the given event filters registered with the `EventSca ```rust use alloy::{network::Ethereum, providers::{Provider, ProviderBuilder}, sol_types::SolEvent}; -use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; +use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProviderBuilder}; use tokio_stream::StreamExt; use crate::MyContract; @@ -71,7 +71,7 @@ async fn run_scanner( ) -> Result<(), Box> { // Connect to provider let provider = ProviderBuilder::new().connect(ws_url).await?; - let robust_provider = RobustProvider::new(provider); + let robust_provider = RobustProviderBuilder::new(provider).build().await?; // Configure scanner with custom batch size (optional) let mut scanner = EventScannerBuilder::live() @@ -116,30 +116,33 @@ async fn run_scanner( `EventScannerBuilder` provides mode-specific constructors and functions to configure settings before connecting. Once configured, connect using: -- `connect(robust_provider)` - Connect using a `RobustProvider` wrapping your alloy provider +- `connect(provider)` - Connect using a `RobustProvider` wrapping your alloy provider or using an alloy provider directly This will connect the `EventScanner` and allow you to create event streams and start scanning in various [modes](#scanning-modes). ```rust use alloy::providers::{Provider, ProviderBuilder}; -use event_scanner::robust_provider::RobustProvider; +use event_scanner::robust_provider::RobustProviderBuilder; // Connect to provider (example with WebSocket) let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; -let robust_provider = RobustProvider::new(provider); // Live streaming mode let scanner = EventScannerBuilder::live() .max_block_range(500) // Optional: set max blocks per read (default: 1000) .block_confirmations(12) // Optional: set block confirmations (default: 12) - .connect(robust_provider.clone()); + .connect(provider.clone()); // Historical block range mode let scanner = EventScannerBuilder::historic() .from_block(1_000_000) .to_block(2_000_000) .max_block_range(500) - .connect(robust_provider.clone()); + .connect(provider.clone()); + +// we can also wrap the provider in a RobustProvider +// for more advanced configurations like retries and fallbacks +let robust_provider = RobustProviderBuilder::new(provider).build().await?; // Latest events mode let scanner = EventScannerBuilder::latest(100) diff --git a/examples/historical_scanning/main.rs b/examples/historical_scanning/main.rs index 9adcba49..1d2cc039 100644 --- a/examples/historical_scanning/main.rs +++ b/examples/historical_scanning/main.rs @@ -1,7 +1,9 @@ use alloy::{providers::ProviderBuilder, sol, sol_types::SolEvent}; use alloy_node_bindings::Anvil; -use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; +use event_scanner::{ + EventFilter, EventScannerBuilder, Message, robust_provider::RobustProviderBuilder, +}; use tokio_stream::StreamExt; use tracing::{error, info}; use tracing_subscriber::EnvFilter; @@ -52,12 +54,14 @@ async fn main() -> anyhow::Result<()> { let _ = counter_contract.increase().send().await?.get_receipt().await?; - let robust_provider = RobustProvider::new(provider) + let robust_provider = RobustProviderBuilder::new(provider) .max_timeout(std::time::Duration::from_secs(30)) .max_retries(5) - .min_delay(std::time::Duration::from_millis(500)); + .min_delay(std::time::Duration::from_millis(500)) + .build() + .await?; - let mut scanner = EventScannerBuilder::historic().connect(robust_provider); + let mut scanner = EventScannerBuilder::historic().connect(robust_provider).await?; let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/latest_events_scanning/main.rs b/examples/latest_events_scanning/main.rs index dcbcf2ff..a51c404f 100644 --- a/examples/latest_events_scanning/main.rs +++ b/examples/latest_events_scanning/main.rs @@ -1,6 +1,8 @@ use alloy::{providers::ProviderBuilder, sol, sol_types::SolEvent}; use alloy_node_bindings::Anvil; -use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; +use event_scanner::{ + EventFilter, EventScannerBuilder, Message, robust_provider::RobustProviderBuilder, +}; use tokio_stream::StreamExt; use tracing::{error, info}; use tracing_subscriber::EnvFilter; @@ -49,12 +51,14 @@ async fn main() -> anyhow::Result<()> { .contract_address(*contract_address) .event(Counter::CountIncreased::SIGNATURE); - let robust_provider = RobustProvider::new(provider) + let robust_provider = RobustProviderBuilder::new(provider) .max_timeout(std::time::Duration::from_secs(30)) .max_retries(5) - .min_delay(std::time::Duration::from_millis(500)); + .min_delay(std::time::Duration::from_millis(500)) + .build() + .await?; - let mut scanner = EventScannerBuilder::latest(5).connect(robust_provider); + let mut scanner = EventScannerBuilder::latest(5).connect(robust_provider).await?; let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/live_scanning/main.rs b/examples/live_scanning/main.rs index 47d7bef0..dd888e20 100644 --- a/examples/live_scanning/main.rs +++ b/examples/live_scanning/main.rs @@ -1,6 +1,8 @@ use alloy::{providers::ProviderBuilder, sol, sol_types::SolEvent}; use alloy_node_bindings::Anvil; -use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; +use event_scanner::{ + EventFilter, EventScannerBuilder, Message, robust_provider::RobustProviderBuilder, +}; use tokio_stream::StreamExt; use tracing::{error, info}; @@ -50,12 +52,14 @@ async fn main() -> anyhow::Result<()> { .contract_address(*contract_address) .event(Counter::CountIncreased::SIGNATURE); - let robust_provider = RobustProvider::new(provider) + let robust_provider = RobustProviderBuilder::new(provider) .max_timeout(std::time::Duration::from_secs(30)) .max_retries(5) - .min_delay(std::time::Duration::from_millis(500)); + .min_delay(std::time::Duration::from_millis(500)) + .build() + .await?; - let mut scanner = EventScannerBuilder::live().connect(robust_provider); + let mut scanner = EventScannerBuilder::live().connect(robust_provider).await?; let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/sync_from_block_scanning/main.rs b/examples/sync_from_block_scanning/main.rs index d8a0107d..35c0c254 100644 --- a/examples/sync_from_block_scanning/main.rs +++ b/examples/sync_from_block_scanning/main.rs @@ -2,7 +2,9 @@ use std::time::Duration; use alloy::{providers::ProviderBuilder, sol, sol_types::SolEvent}; use alloy_node_bindings::Anvil; -use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; +use event_scanner::{ + EventFilter, EventScannerBuilder, Message, robust_provider::RobustProviderBuilder, +}; use tokio::time::sleep; use tokio_stream::StreamExt; use tracing::{error, info}; @@ -58,12 +60,14 @@ async fn main() -> anyhow::Result<()> { info!("Historical event {} created", i + 1); } - let robust_provider = RobustProvider::new(provider) + let robust_provider = RobustProviderBuilder::new(provider) .max_timeout(Duration::from_secs(30)) .max_retries(5) - .min_delay(Duration::from_millis(500)); + .min_delay(Duration::from_millis(500)) + .build() + .await?; - let mut scanner = EventScannerBuilder::sync().from_block(0).connect(robust_provider); + let mut scanner = EventScannerBuilder::sync().from_block(0).connect(robust_provider).await?; let mut stream = scanner.subscribe(increase_filter); diff --git a/examples/sync_from_latest_scanning/main.rs b/examples/sync_from_latest_scanning/main.rs index a5c716e5..c8ba8cfb 100644 --- a/examples/sync_from_latest_scanning/main.rs +++ b/examples/sync_from_latest_scanning/main.rs @@ -1,6 +1,8 @@ use alloy::{providers::ProviderBuilder, sol, sol_types::SolEvent}; use alloy_node_bindings::Anvil; -use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; +use event_scanner::{ + EventFilter, EventScannerBuilder, Message, robust_provider::RobustProviderBuilder, +}; use tokio_stream::StreamExt; use tracing::{error, info}; @@ -50,12 +52,14 @@ async fn main() -> anyhow::Result<()> { .contract_address(*contract_address) .event(Counter::CountIncreased::SIGNATURE); - let robust_provider = RobustProvider::new(provider) + let robust_provider = RobustProviderBuilder::new(provider) .max_timeout(std::time::Duration::from_secs(30)) .max_retries(5) - .min_delay(std::time::Duration::from_millis(500)); + .min_delay(std::time::Duration::from_millis(500)) + .build() + .await?; - let mut client = EventScannerBuilder::sync().from_latest(5).connect(robust_provider); + let mut client = EventScannerBuilder::sync().from_latest(5).connect(robust_provider).await?; let mut stream = client.subscribe(increase_filter); diff --git a/src/block_range_scanner.rs b/src/block_range_scanner.rs index f227199a..da742503 100644 --- a/src/block_range_scanner.rs +++ b/src/block_range_scanner.rs @@ -12,7 +12,7 @@ //! BlockRangeScanner, BlockRangeScannerClient, DEFAULT_BLOCK_CONFIRMATIONS, //! DEFAULT_MAX_BLOCK_RANGE, Message, //! }, -//! robust_provider::RobustProvider, +//! robust_provider::RobustProviderBuilder, //! }; //! use tokio::time::Duration; //! use tracing::{error, info}; @@ -24,8 +24,8 @@ //! //! // Configuration //! let provider = ProviderBuilder::new().connect("ws://localhost:8546").await?; -//! let robust_provider = RobustProvider::new(provider); -//! let block_range_scanner = BlockRangeScanner::new().connect(robust_provider); +//! let robust_provider = RobustProviderBuilder::new(provider).build().await?; +//! let block_range_scanner = BlockRangeScanner::new().connect(robust_provider).await?; //! //! // Create client to send subscribe command to block scanner //! let client: BlockRangeScannerClient = block_range_scanner.run()?; @@ -69,7 +69,7 @@ use tokio_stream::{StreamExt, wrappers::ReceiverStream}; use crate::{ ScannerMessage, error::ScannerError, - robust_provider::{Error as RobustProviderError, RobustProvider}, + robust_provider::{Error as RobustProviderError, IntoRobustProvider, RobustProvider}, types::{ScannerStatus, TryStream}, }; use alloy::{ @@ -148,9 +148,16 @@ impl BlockRangeScanner { } /// Connects to an existing provider - #[must_use] - pub fn connect(self, provider: RobustProvider) -> ConnectedBlockRangeScanner { - ConnectedBlockRangeScanner { provider, max_block_range: self.max_block_range } + /// + /// # Errors + /// + /// Returns an error if the provider connection fails. + pub async fn connect( + self, + provider: impl IntoRobustProvider, + ) -> Result, ScannerError> { + let provider = provider.into_robust_provider().await?; + Ok(ConnectedBlockRangeScanner { provider, max_block_range: self.max_block_range }) } } diff --git a/src/event_scanner/scanner/mod.rs b/src/event_scanner/scanner/mod.rs index e61e379f..1a13a5f8 100644 --- a/src/event_scanner/scanner/mod.rs +++ b/src/event_scanner/scanner/mod.rs @@ -6,13 +6,13 @@ use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use crate::{ - EventFilter, Message, + EventFilter, Message, ScannerError, block_range_scanner::{ BlockRangeScanner, ConnectedBlockRangeScanner, DEFAULT_BLOCK_CONFIRMATIONS, MAX_BUFFERED_MESSAGES, }, event_scanner::listener::EventListener, - robust_provider::RobustProvider, + robust_provider::IntoRobustProvider, }; mod common; @@ -78,15 +78,15 @@ impl EventScannerBuilder { /// /// ```no_run /// # use alloy::{network::Ethereum, providers::{Provider, ProviderBuilder}}; - /// # use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; + /// # use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProviderBuilder}; /// # use tokio_stream::StreamExt; /// # /// # async fn example() -> Result<(), Box> { /// # let contract_address = alloy::primitives::address!("0xd8dA6BF26964af9d7eed9e03e53415d37aa96045"); /// // Stream all events from genesis to latest block /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; - /// let robust_provider = RobustProvider::new(provider); - /// let mut scanner = EventScannerBuilder::historic().connect(robust_provider); + /// let robust_provider = RobustProviderBuilder::new(provider).build().await?; + /// let mut scanner = EventScannerBuilder::historic().connect(robust_provider).await?; /// /// let filter = EventFilter::new().contract_address(contract_address); /// let mut stream = scanner.subscribe(filter); @@ -104,16 +104,17 @@ impl EventScannerBuilder { /// /// ```no_run /// # use alloy::{network::Ethereum, providers::{Provider, ProviderBuilder}}; - /// # use event_scanner::{EventScannerBuilder, robust_provider::RobustProvider}; + /// # use event_scanner::{EventScannerBuilder, robust_provider::RobustProviderBuilder}; /// # /// # async fn example() -> Result<(), Box> { /// // Stream events between blocks [1_000_000, 2_000_000] /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; - /// let robust_provider = RobustProvider::new(provider); + /// let robust_provider = RobustProviderBuilder::new(provider).build().await?; /// let mut scanner = EventScannerBuilder::historic() /// .from_block(1_000_000) /// .to_block(2_000_000) - /// .connect(robust_provider); + /// .connect(robust_provider) + /// .await?; /// # Ok(()) /// # } /// ``` @@ -143,17 +144,18 @@ impl EventScannerBuilder { /// /// ```no_run /// # use alloy::{network::Ethereum, providers::{Provider, ProviderBuilder}}; - /// # use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; + /// # use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProviderBuilder}; /// # use tokio_stream::StreamExt; /// # /// # async fn example() -> Result<(), Box> { /// # let contract_address = alloy::primitives::address!("0xd8dA6BF26964af9d7eed9e03e53415d37aa96045"); /// // Stream new events as they arrive /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; - /// let robust_provider = RobustProvider::new(provider); + /// let robust_provider = RobustProviderBuilder::new(provider).build().await?; /// let mut scanner = EventScannerBuilder::live() /// .block_confirmations(20) - /// .connect(robust_provider); + /// .connect(robust_provider) + /// .await?; /// /// let filter = EventFilter::new().contract_address(contract_address); /// let mut stream = scanner.subscribe(filter); @@ -230,15 +232,15 @@ impl EventScannerBuilder { /// /// ```no_run /// # use alloy::{network::Ethereum, primitives::Address, providers::{Provider, ProviderBuilder}}; - /// # use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; + /// # use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProviderBuilder}; /// # use tokio_stream::StreamExt; /// # /// # async fn example() -> Result<(), Box> { /// # let contract_address = alloy::primitives::address!("0xd8dA6BF26964af9d7eed9e03e53415d37aa96045"); /// // Collect the latest 10 events across Earliest..=Latest /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; - /// let robust_provider = RobustProvider::new(provider); - /// let mut scanner = EventScannerBuilder::latest(10).connect(robust_provider); + /// let robust_provider = RobustProviderBuilder::new(provider).build().await?; + /// let mut scanner = EventScannerBuilder::latest(10).connect(robust_provider).await?; /// /// let filter = EventFilter::new().contract_address(contract_address); /// let mut stream = scanner.subscribe(filter); @@ -257,12 +259,12 @@ impl EventScannerBuilder { /// /// ```no_run /// # use alloy::{network::Ethereum, providers::{Provider, ProviderBuilder}}; - /// # use event_scanner::{EventScannerBuilder, robust_provider::RobustProvider}; + /// # use event_scanner::{EventScannerBuilder, robust_provider::RobustProviderBuilder}; /// # /// # async fn example() -> Result<(), Box> { /// // Collect the latest 5 events between blocks [1_000_000, 1_100_000] /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; - /// let robust_provider = RobustProvider::new(provider); + /// let robust_provider = RobustProviderBuilder::new(provider).build().await?; /// let mut scanner = EventScannerBuilder::latest(5) /// .from_block(1_000_000) /// .to_block(1_100_000) @@ -389,10 +391,16 @@ impl EventScannerBuilder { /// Connects to an existing provider. /// /// Final builder method: consumes the builder and returns the built [`EventScanner`]. - #[must_use] - pub fn connect(self, provider: RobustProvider) -> EventScanner { - let block_range_scanner = self.block_range_scanner.connect::(provider); - EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() } + /// + /// # Errors + /// + /// Returns an error if the provider connection fails. + pub async fn connect( + self, + provider: impl IntoRobustProvider, + ) -> Result, ScannerError> { + let block_range_scanner = self.block_range_scanner.connect::(provider).await?; + Ok(EventScanner { config: self.config, block_range_scanner, listeners: Vec::new() }) } } @@ -447,11 +455,10 @@ mod tests { assert_eq!(builder.config.block_confirmations, DEFAULT_BLOCK_CONFIRMATIONS); } - #[test] - fn test_historic_event_stream_listeners_vector_updates() { + #[tokio::test] + async fn test_historic_event_stream_listeners_vector_updates() -> anyhow::Result<()> { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let robust_provider = RobustProvider::new(provider.clone()); - let mut scanner = EventScannerBuilder::historic().connect(robust_provider); + let mut scanner = EventScannerBuilder::historic().connect(provider).await?; assert!(scanner.listeners.is_empty()); @@ -461,17 +468,20 @@ mod tests { let _stream2 = scanner.subscribe(EventFilter::new()); let _stream3 = scanner.subscribe(EventFilter::new()); assert_eq!(scanner.listeners.len(), 3); + + Ok(()) } - #[test] - fn test_historic_event_stream_channel_capacity() { + #[tokio::test] + async fn test_historic_event_stream_channel_capacity() -> anyhow::Result<()> { let provider = RootProvider::::new(RpcClient::mocked(Asserter::new())); - let robust_provider = RobustProvider::new(provider.clone()); - let mut scanner = EventScannerBuilder::historic().connect(robust_provider); + let mut scanner = EventScannerBuilder::historic().connect(provider).await?; let _ = scanner.subscribe(EventFilter::new()); let sender = &scanner.listeners[0].sender; assert_eq!(sender.capacity(), MAX_BUFFERED_MESSAGES); + + Ok(()) } } diff --git a/src/event_scanner/scanner/sync/mod.rs b/src/event_scanner/scanner/sync/mod.rs index ec871636..e8467e54 100644 --- a/src/event_scanner/scanner/sync/mod.rs +++ b/src/event_scanner/scanner/sync/mod.rs @@ -25,17 +25,18 @@ impl EventScannerBuilder { /// /// ```no_run /// # use alloy::{network::Ethereum, providers::{Provider, ProviderBuilder}}; - /// # use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; + /// # use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProviderBuilder}; /// # use tokio_stream::StreamExt; /// # /// # async fn example() -> Result<(), Box> { /// # let contract_address = alloy::primitives::address!("0xd8dA6BF26964af9d7eed9e03e53415d37aa96045"); /// // Fetch the latest 10 events, then stream new events continuously /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; - /// let robust_provider = RobustProvider::new(provider); + /// let robust_provider = RobustProviderBuilder::new(provider).build().await?; /// let mut scanner = EventScannerBuilder::sync() /// .from_latest(10) - /// .connect(robust_provider); + /// .connect(robust_provider) + /// .await?; /// /// let filter = EventFilter::new().contract_address(contract_address); /// let mut stream = scanner.subscribe(filter); @@ -121,17 +122,18 @@ impl EventScannerBuilder { /// /// ```no_run /// # use alloy::{network::Ethereum, providers::{Provider, ProviderBuilder}}; - /// # use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProvider}; + /// # use event_scanner::{EventFilter, EventScannerBuilder, Message, robust_provider::RobustProviderBuilder}; /// # use tokio_stream::StreamExt; /// # /// # async fn example() -> Result<(), Box> { /// # let contract_address = alloy::primitives::address!("0xd8dA6BF26964af9d7eed9e03e53415d37aa96045"); /// // Sync from block 1_000_000 to present, then stream new events /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; - /// let robust_provider = RobustProvider::new(provider); + /// let robust_provider = RobustProviderBuilder::new(provider).build().await?; /// let mut scanner = EventScannerBuilder::sync() /// .from_block(1_000_000) - /// .connect(robust_provider); + /// .connect(robust_provider) + /// .await?; /// /// let filter = EventFilter::new().contract_address(contract_address); /// let mut stream = scanner.subscribe(filter); @@ -160,15 +162,16 @@ impl EventScannerBuilder { /// /// ```no_run /// # use alloy::{network::Ethereum, eips::BlockNumberOrTag, providers::{Provider, ProviderBuilder}}; - /// # use event_scanner::{EventScannerBuilder, robust_provider::RobustProvider}; + /// # use event_scanner::{EventScannerBuilder, robust_provider::RobustProviderBuilder}; /// # /// # async fn example() -> Result<(), Box> { /// // Sync from genesis block /// let provider = ProviderBuilder::new().connect("ws://localhost:8545").await?; - /// let robust_provider = RobustProvider::new(provider); + /// let robust_provider = RobustProviderBuilder::new(provider).build().await?; /// let mut scanner = EventScannerBuilder::sync() /// .from_block(BlockNumberOrTag::Earliest) - /// .connect(robust_provider); + /// .connect(robust_provider) + /// .await?; /// # Ok(()) /// # } /// ``` diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 5b468a71..bdc6d2d2 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -1,12 +1,16 @@ -use std::{fmt::Debug, future::Future, sync::Arc, time::Duration}; +use std::{fmt::Debug, future::Future, marker::PhantomData, sync::Arc, time::Duration}; use alloy::{ eips::{BlockId, BlockNumberOrTag}, - network::Network, - providers::{Provider, RootProvider}, + network::{Ethereum, Network}, + providers::{ + DynProvider, Provider, RootProvider, + fillers::{FillProvider, TxFiller}, + layers::{CacheProvider, CallBatchProvider}, + }, pubsub::Subscription, rpc::types::{Filter, Log}, - transports::{RpcError, TransportErrorKind}, + transports::{RpcError, TransportErrorKind, http::reqwest::Url}, }; use backon::{ExponentialBuilder, Retryable}; use thiserror::Error; @@ -35,17 +39,86 @@ impl From for Error { } } -/// Provider wrapper with built-in retry and timeout mechanisms. -/// -/// This wrapper around Alloy providers automatically handles retries, -/// timeouts, and error logging for RPC calls. -/// The first provider in the vector is treated as the primary provider. -#[derive(Clone)] -pub struct RobustProvider { - providers: Vec>, - max_timeout: Duration, - max_retries: usize, - min_delay: Duration, +pub trait IntoProvider { + fn into_provider( + self, + ) -> impl std::future::Future, Error>> + Send; +} + +impl IntoProvider for RobustProvider { + async fn into_provider(self) -> Result, Error> { + Ok(self.primary().to_owned()) + } +} + +impl IntoProvider for RootProvider { + async fn into_provider(self) -> Result, Error> { + Ok(self) + } +} + +impl IntoProvider for &str { + async fn into_provider(self) -> Result, Error> { + Ok(RootProvider::connect(self).await?) + } +} + +impl IntoProvider for Url { + async fn into_provider(self) -> Result, Error> { + Ok(RootProvider::connect(self.as_str()).await?) + } +} + +impl IntoProvider for FillProvider +where + F: TxFiller, + P: Provider, + N: Network, +{ + async fn into_provider(self) -> Result, Error> { + Ok(self) + } +} + +impl IntoProvider for CacheProvider +where + P: Provider, + N: Network, +{ + async fn into_provider(self) -> Result, Error> { + Ok(self) + } +} + +impl IntoProvider for DynProvider +where + N: Network, +{ + async fn into_provider(self) -> Result, Error> { + Ok(self) + } +} + +impl IntoProvider for CallBatchProvider +where + P: Provider + 'static, + N: Network, +{ + async fn into_provider(self) -> Result, Error> { + Ok(self) + } +} + +pub trait IntoRobustProvider { + fn into_robust_provider( + self, + ) -> impl std::future::Future, Error>> + Send; +} + +impl + Send> IntoRobustProvider for P { + async fn into_robust_provider(self) -> Result, Error> { + RobustProviderBuilder::new(self).build().await + } } // RPC retry and timeout settings @@ -56,19 +129,47 @@ pub const DEFAULT_MAX_RETRIES: usize = 3; /// Default base delay between retries. pub const DEFAULT_MIN_DELAY: Duration = Duration::from_secs(1); -impl RobustProvider { +#[derive(Clone)] +pub struct RobustProviderBuilder> { + providers: Vec

, + max_timeout: Duration, + max_retries: usize, + min_delay: Duration, + _network: PhantomData, +} + +impl> RobustProviderBuilder { /// Create a new `RobustProvider` with default settings. + /// /// The provided provider is treated as the primary provider. #[must_use] - pub fn new(provider: impl Provider) -> Self { + pub fn new(provider: P) -> Self { Self { - providers: vec![provider.root().to_owned()], + providers: vec![provider], max_timeout: DEFAULT_MAX_TIMEOUT, max_retries: DEFAULT_MAX_RETRIES, min_delay: DEFAULT_MIN_DELAY, + _network: PhantomData, } } + /// Create a new `RobustProvider` with no retry attempts and only timeout set. + /// + /// The provided provider is treated as the primary provider. + #[must_use] + pub fn fragile(provider: P) -> Self { + Self::new(provider).max_retries(0).min_delay(Duration::ZERO) + } + + /// Add a fallback provider to the list. + /// + /// Fallback providers are used when the primary provider times out or fails. + #[must_use] + pub fn fallback(mut self, provider: P) -> Self { + self.providers.push(provider); + self + } + /// Set the maximum timeout for RPC operations. #[must_use] pub fn max_timeout(mut self, timeout: Duration) -> Self { @@ -90,6 +191,41 @@ impl RobustProvider { self } + /// Build the `RobustProvider`. + /// + /// Final builder method: consumes the builder and returns the built [`RobustProvider`]. + /// + /// # Errors + /// + /// Returns an error if any of the providers fail to connect. + pub async fn build(self) -> Result, Error> { + let mut providers = vec![]; + for p in self.providers { + providers.push(p.into_provider().await?.root().to_owned()); + } + Ok(RobustProvider { + providers, + max_timeout: self.max_timeout, + max_retries: self.max_retries, + min_delay: self.min_delay, + }) + } +} + +/// Provider wrapper with built-in retry and timeout mechanisms. +/// +/// This wrapper around Alloy providers automatically handles retries, +/// timeouts, and error logging for RPC calls. +/// The first provider in the vector is treated as the primary provider. +#[derive(Clone)] +pub struct RobustProvider { + providers: Vec>, + max_timeout: Duration, + max_retries: usize, + min_delay: Duration, +} + +impl RobustProvider { /// Get a reference to the primary provider (the first provider in the list) /// /// # Panics @@ -101,15 +237,6 @@ impl RobustProvider { self.providers.first().expect("providers vector should never be empty") } - /// Add a fallback provider to the list. - /// - /// Fallback providers are used when the primary provider times out or fails. - #[must_use] - pub fn fallback(mut self, provider: impl Provider) -> Self { - self.providers.push(provider.root().to_owned()); - self - } - /// Fetch a block by number with retry and timeout. /// /// # Errors @@ -337,14 +464,14 @@ impl RobustProvider { mod tests { use super::*; use alloy::{ - network::Ethereum, - providers::{ProviderBuilder, WsConnect}, + consensus::BlockHeader, + providers::{ProviderBuilder, WsConnect, ext::AnvilApi}, }; use alloy_node_bindings::Anvil; use std::sync::atomic::{AtomicUsize, Ordering}; use tokio::time::sleep; - fn test_provider(timeout: u64, max_retries: usize, min_delay: u64) -> RobustProvider { + fn test_provider(timeout: u64, max_retries: usize, min_delay: u64) -> RobustProvider { RobustProvider { providers: vec![RootProvider::new_http("http://localhost:8545".parse().unwrap())], max_timeout: Duration::from_millis(timeout), @@ -435,45 +562,50 @@ mod tests { } #[tokio::test] - async fn test_subscribe_fails_causes_backup_to_be_used() { - let anvil_1 = Anvil::new().try_spawn().expect("Failed to start anvil"); + async fn test_subscribe_fails_causes_backup_to_be_used() -> anyhow::Result<()> { + let anvil_1 = Anvil::new().try_spawn()?; - let ws_provider_1 = ProviderBuilder::new() - .connect_ws(WsConnect::new(anvil_1.ws_endpoint_url().as_str())) - .await - .expect("Failed to connect to WS"); + let ws_provider_1 = + ProviderBuilder::new().connect(anvil_1.ws_endpoint_url().as_str()).await?; - let anvil_2 = Anvil::new().port(8222_u16).try_spawn().expect("Failed to start anvil"); + let anvil_2 = Anvil::new().try_spawn()?; let ws_provider_2 = ProviderBuilder::new() - .connect_ws(WsConnect::new(anvil_2.ws_endpoint_url().as_str())) + .connect(anvil_2.ws_endpoint_url().as_str()) .await .expect("Failed to connect to WS"); - let robust = RobustProvider::new(ws_provider_1) - .fallback(ws_provider_2) - .max_timeout(Duration::from_secs(5)) - .max_retries(10) - .min_delay(Duration::from_millis(100)); + let robust = RobustProviderBuilder::fragile(ws_provider_1.clone()) + .fallback(ws_provider_2.clone()) + .max_timeout(Duration::from_secs(1)) + .build() + .await?; drop(anvil_1); - let result = robust.subscribe_blocks().await; + let mut subscription = robust.subscribe_blocks().await?; - assert!(result.is_ok(), "Expected subscribe blocks to work"); + ws_provider_2.anvil_mine(Some(2), None).await?; + + assert_eq!(1, subscription.recv().await?.number()); + assert_eq!(2, subscription.recv().await?.number()); + assert!(subscription.is_empty()); + + Ok(()) } #[tokio::test] - async fn test_subscribe_fails_when_all_providers_lack_pubsub() { - let anvil = Anvil::new().try_spawn().expect("Failed to start anvil"); + async fn test_subscribe_fails_when_all_providers_lack_pubsub() -> anyhow::Result<()> { + let anvil = Anvil::new().try_spawn()?; let http_provider = ProviderBuilder::new().connect_http(anvil.endpoint_url()); - let robust = RobustProvider::new(http_provider.clone()) + let robust = RobustProviderBuilder::new(http_provider.clone()) .fallback(http_provider) .max_timeout(Duration::from_secs(5)) - .max_retries(10) - .min_delay(Duration::from_millis(100)); + .min_delay(Duration::from_millis(100)) + .build() + .await?; let result = robust.subscribe_blocks().await.unwrap_err(); @@ -484,13 +616,15 @@ mod tests { RpcError::Transport(TransportErrorKind::PubsubUnavailable) )); } - _ => panic!("Should be a pubsub error"), + other => panic!("Expected PubsubUnavailable error type, got: {other:?}"), } + + Ok(()) } #[tokio::test] - async fn test_subscribe_fails_if_primary_provider_lacks_pubsub() { - let anvil = Anvil::new().try_spawn().expect("Failed to start anvil"); + async fn test_subscribe_fails_if_primary_provider_lacks_pubsub() -> anyhow::Result<()> { + let anvil = Anvil::new().try_spawn()?; let http_provider = ProviderBuilder::new().connect_http(anvil.endpoint_url()); let ws_provider = ProviderBuilder::new() @@ -498,27 +632,29 @@ mod tests { .await .expect("Failed to connect to WS"); - let robust = RobustProvider::new(http_provider) + let robust = RobustProviderBuilder::fragile(http_provider) .fallback(ws_provider) .max_timeout(Duration::from_secs(5)) - .max_retries(10) - .min_delay(Duration::from_millis(100)); + .build() + .await?; - let result = robust.subscribe_blocks().await.unwrap_err(); + let err = robust.subscribe_blocks().await.unwrap_err(); - match result { + match err { Error::RpcError(e) => { assert!(matches!( e.as_ref(), RpcError::Transport(TransportErrorKind::PubsubUnavailable) )); } - _ => panic!("Should be a pubsub error"), + other => panic!("Expected PubsubUnavailable error type, got: {other:?}"), } + + Ok(()) } #[tokio::test] - async fn test_ws_fails_http_fallback_returns_primary_error() { + async fn test_ws_fails_http_fallback_returns_primary_error() -> anyhow::Result<()> { let anvil_1 = Anvil::new().try_spawn().expect("Failed to start anvil"); let ws_provider = ProviderBuilder::new() @@ -526,14 +662,14 @@ mod tests { .await .expect("Failed to connect to WS"); - let anvil_2 = Anvil::new().port(8225_u16).try_spawn().expect("Failed to start anvil"); + let anvil_2 = Anvil::new().try_spawn().expect("Failed to start anvil"); let http_provider = ProviderBuilder::new().connect_http(anvil_2.endpoint_url()); - let robust = RobustProvider::new(ws_provider.clone()) + let robust = RobustProviderBuilder::fragile(ws_provider.clone()) .fallback(http_provider) .max_timeout(Duration::from_millis(500)) - .max_retries(0) - .min_delay(Duration::from_millis(10)); + .build() + .await?; // force ws_provider to fail and return BackendGone drop(anvil_1); @@ -549,5 +685,7 @@ mod tests { } Error::BlockNotFound(id) => panic!("Unexpected error type: BlockNotFound({id})"), } + + Ok(()) } } diff --git a/tests/block_range_scanner.rs b/tests/block_range_scanner.rs index 7a666ab2..7dfe10c4 100644 --- a/tests/block_range_scanner.rs +++ b/tests/block_range_scanner.rs @@ -6,22 +6,21 @@ use alloy::{ use alloy_node_bindings::Anvil; use event_scanner::{ ScannerError, ScannerStatus, assert_closed, assert_empty, assert_next, - block_range_scanner::BlockRangeScanner, robust_provider::RobustProvider, + block_range_scanner::BlockRangeScanner, }; #[tokio::test] async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; - let robust_provider = RobustProvider::new(provider); // --- Zero block confirmations -> stream immediately --- - let client = BlockRangeScanner::new().connect(robust_provider.clone()).run()?; + let client = BlockRangeScanner::new().connect(provider.clone()).await?.run()?; let mut stream = client.stream_live(0).await?; - robust_provider.primary().anvil_mine(Some(5), None).await?; + provider.anvil_mine(Some(5), None).await?; assert_next!(stream, 1..=1); assert_next!(stream, 2..=2); @@ -30,7 +29,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh assert_next!(stream, 5..=5); let mut stream = assert_empty!(stream); - robust_provider.primary().anvil_mine(Some(1), None).await?; + provider.anvil_mine(Some(1), None).await?; assert_next!(stream, 6..=6); assert_empty!(stream); @@ -39,7 +38,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh let mut stream = client.stream_live(1).await?; - robust_provider.primary().anvil_mine(Some(5), None).await?; + provider.anvil_mine(Some(5), None).await?; assert_next!(stream, 6..=6); assert_next!(stream, 7..=7); @@ -48,7 +47,7 @@ async fn live_mode_processes_all_blocks_respecting_block_confirmations() -> anyh assert_next!(stream, 10..=10); let mut stream = assert_empty!(stream); - robust_provider.primary().anvil_mine(Some(1), None).await?; + provider.anvil_mine(Some(1), None).await?; assert_next!(stream, 11..=11); assert_empty!(stream); @@ -61,23 +60,22 @@ async fn stream_from_latest_starts_at_tip_not_confirmed() -> anyhow::Result<()> let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; - let robust_provider = RobustProvider::new(provider); - let client = BlockRangeScanner::new().connect(robust_provider.clone()).run()?; + let client = BlockRangeScanner::new().connect(provider.clone()).await?.run()?; - robust_provider.primary().anvil_mine(Some(20), None).await?; + provider.anvil_mine(Some(20), None).await?; let stream = client.stream_from(BlockNumberOrTag::Latest, 5).await?; let stream = assert_empty!(stream); - robust_provider.primary().anvil_mine(Some(4), None).await?; + provider.anvil_mine(Some(4), None).await?; let mut stream = assert_empty!(stream); - robust_provider.primary().anvil_mine(Some(1), None).await?; + provider.anvil_mine(Some(1), None).await?; assert_next!(stream, 20..=20); let mut stream = assert_empty!(stream); - robust_provider.primary().anvil_mine(Some(1), None).await?; + provider.anvil_mine(Some(1), None).await?; assert_next!(stream, 21..=21); assert_empty!(stream); @@ -89,13 +87,12 @@ async fn continuous_blocks_if_reorg_less_than_block_confirmation() -> anyhow::Re let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; - let robust_provider = RobustProvider::new(provider); - let client = BlockRangeScanner::new().connect(robust_provider.clone()).run()?; + let client = BlockRangeScanner::new().connect(provider.clone()).await?.run()?; let mut stream = client.stream_live(5).await?; // mine initial blocks - robust_provider.primary().anvil_mine(Some(10), None).await?; + provider.anvil_mine(Some(10), None).await?; // assert initial block ranges immediately to avoid Anvil race condition: // @@ -110,12 +107,9 @@ async fn continuous_blocks_if_reorg_less_than_block_confirmation() -> anyhow::Re assert_next!(stream, 5..=5); // reorg less blocks than the block_confirmation config - robust_provider - .primary() - .anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs: vec![] }) - .await?; + provider.anvil_reorg(ReorgOptions { depth: 4, tx_block_pairs: vec![] }).await?; // mint additional blocks so the scanner processes reorged blocks - robust_provider.primary().anvil_mine(Some(5), None).await?; + provider.anvil_mine(Some(5), None).await?; // no ReorgDetected should be emitted assert_next!(stream, 6..=6); @@ -133,13 +127,12 @@ async fn shallow_block_confirmation_does_not_mitigate_reorg() -> anyhow::Result< let anvil = Anvil::new().try_spawn()?; let provider = ProviderBuilder::new().connect(anvil.ws_endpoint_url().as_str()).await?; - let robust_provider = RobustProvider::new(provider); - let client = BlockRangeScanner::new().connect(robust_provider.clone()).run()?; + let client = BlockRangeScanner::new().connect(provider.clone()).await?.run()?; let mut stream = client.stream_live(3).await?; // mine initial blocks - robust_provider.primary().anvil_mine(Some(10), None).await?; + provider.anvil_mine(Some(10), None).await?; // assert initial block ranges immediately to avoid Anvil race condition: // @@ -156,12 +149,9 @@ async fn shallow_block_confirmation_does_not_mitigate_reorg() -> anyhow::Result< assert_next!(stream, 7..=7); // reorg more blocks than the block_confirmation config - robust_provider - .primary() - .anvil_reorg(ReorgOptions { depth: 8, tx_block_pairs: vec![] }) - .await?; + provider.anvil_reorg(ReorgOptions { depth: 8, tx_block_pairs: vec![] }).await?; // mint additional blocks - robust_provider.primary().anvil_mine(Some(3), None).await?; + provider.anvil_mine(Some(3), None).await?; assert_next!(stream, ScannerStatus::ReorgDetected); assert_next!(stream, 0..=0); @@ -190,9 +180,8 @@ async fn historical_emits_correction_range_when_reorg_below_end() -> anyhow::Res let end_num = 110; - let robust_provider = RobustProvider::new(provider.clone()); - - let client = BlockRangeScanner::new().max_block_range(30).connect(robust_provider).run()?; + let client = + BlockRangeScanner::new().max_block_range(30).connect(provider.clone()).await?.run()?; let mut stream = client .stream_historical(BlockNumberOrTag::Number(0), BlockNumberOrTag::Number(end_num)) @@ -223,8 +212,8 @@ async fn historical_emits_correction_range_when_end_num_reorgs() -> anyhow::Resu let end_num = 120; - let robust_provider = RobustProvider::new(provider.clone()); - let client = BlockRangeScanner::new().max_block_range(30).connect(robust_provider).run()?; + let client = + BlockRangeScanner::new().max_block_range(30).connect(provider.clone()).await?.run()?; let mut stream = client .stream_historical(BlockNumberOrTag::Number(0), BlockNumberOrTag::Number(end_num)) @@ -254,9 +243,8 @@ async fn historic_mode_respects_blocks_read_per_epoch() -> anyhow::Result<()> { provider.anvil_mine(Some(100), None).await?; - let robust_provider = RobustProvider::new(provider); let client = - BlockRangeScanner::new().max_block_range(5).connect(robust_provider.clone()).run()?; + BlockRangeScanner::new().max_block_range(5).connect(provider.clone()).await?.run()?; // ranges where each batch is of max blocks per epoch size let mut stream = client.stream_historical(0, 19).await?; @@ -283,7 +271,7 @@ async fn historic_mode_respects_blocks_read_per_epoch() -> anyhow::Result<()> { assert_closed!(stream); // range where blocks per epoch is larger than the number of blocks on chain - let client = BlockRangeScanner::new().max_block_range(200).connect(robust_provider).run()?; + let client = BlockRangeScanner::new().max_block_range(200).connect(provider).await?.run()?; let mut stream = client.stream_historical(0, 20).await?; assert_next!(stream, 0..=20); @@ -303,8 +291,7 @@ async fn historic_mode_normalises_start_and_end_block() -> anyhow::Result<()> { provider.anvil_mine(Some(11), None).await?; - let robust_provider = RobustProvider::new(provider); - let client = BlockRangeScanner::new().max_block_range(5).connect(robust_provider).run()?; + let client = BlockRangeScanner::new().max_block_range(5).connect(provider).await?.run()?; let mut stream = client.stream_historical(10, 0).await?; assert_next!(stream, 0..=4); @@ -328,8 +315,7 @@ async fn rewind_single_batch_when_epoch_larger_than_range() -> anyhow::Result<() provider.anvil_mine(Some(150), None).await?; - let robust_provider = RobustProvider::new(provider); - let client = BlockRangeScanner::new().max_block_range(100).connect(robust_provider).run()?; + let client = BlockRangeScanner::new().max_block_range(100).connect(provider).await?.run()?; let mut stream = client.rewind(100, 150).await?; @@ -347,8 +333,7 @@ async fn rewind_exact_multiple_of_epoch_creates_full_batches_in_reverse() -> any provider.anvil_mine(Some(15), None).await?; - let robust_provider = RobustProvider::new(provider); - let client = BlockRangeScanner::new().max_block_range(5).connect(robust_provider).run()?; + let client = BlockRangeScanner::new().max_block_range(5).connect(provider).await?.run()?; let mut stream = client.rewind(0, 14).await?; @@ -368,8 +353,7 @@ async fn rewind_with_remainder_trims_first_batch_to_stream_start() -> anyhow::Re provider.anvil_mine(Some(15), None).await?; - let robust_provider = RobustProvider::new(provider); - let client = BlockRangeScanner::new().max_block_range(4).connect(robust_provider).run()?; + let client = BlockRangeScanner::new().max_block_range(4).connect(provider).await?.run()?; let mut stream = client.rewind(3, 12).await?; @@ -389,8 +373,7 @@ async fn rewind_single_block_range() -> anyhow::Result<()> { provider.anvil_mine(Some(15), None).await?; - let robust_provider = RobustProvider::new(provider); - let client = BlockRangeScanner::new().max_block_range(5).connect(robust_provider).run()?; + let client = BlockRangeScanner::new().max_block_range(5).connect(provider).await?.run()?; let mut stream = client.rewind(7, 7).await?; @@ -407,8 +390,7 @@ async fn rewind_epoch_of_one_sends_each_block_in_reverse_order() -> anyhow::Resu provider.anvil_mine(Some(15), None).await?; - let robust_provider = RobustProvider::new(provider); - let client = BlockRangeScanner::new().max_block_range(1).connect(robust_provider).run()?; + let client = BlockRangeScanner::new().max_block_range(1).connect(provider).await?.run()?; let mut stream = client.rewind(5, 8).await?; @@ -430,8 +412,7 @@ async fn command_rewind_defaults_latest_to_earliest_batches_correctly() -> anyho // Mine 20 blocks, so the total number of blocks is 21 (including 0th block) provider.anvil_mine(Some(20), None).await?; - let robust_provider = RobustProvider::new(provider); - let client = BlockRangeScanner::new().max_block_range(7).connect(robust_provider).run()?; + let client = BlockRangeScanner::new().max_block_range(7).connect(provider).await?.run()?; let mut stream = client.rewind(BlockNumberOrTag::Earliest, BlockNumberOrTag::Latest).await?; @@ -451,8 +432,7 @@ async fn command_rewind_handles_start_and_end_in_any_order() -> anyhow::Result<( // Ensure blocks at 3 and 15 exist provider.anvil_mine(Some(16), None).await?; - let robust_provider = RobustProvider::new(provider); - let client = BlockRangeScanner::new().max_block_range(5).connect(robust_provider).run()?; + let client = BlockRangeScanner::new().max_block_range(5).connect(provider).await?.run()?; let mut stream = client.rewind(15, 3).await?; @@ -477,8 +457,7 @@ async fn command_rewind_propagates_block_not_found_error() -> anyhow::Result<()> // Do not mine up to 999 so start won't exist let provider = ProviderBuilder::new().connect(anvil.endpoint().as_str()).await?; - let robust_provider = RobustProvider::new(provider); - let client = BlockRangeScanner::new().max_block_range(5).connect(robust_provider).run()?; + let client = BlockRangeScanner::new().max_block_range(5).connect(provider).await?.run()?; let stream = client.rewind(0, 999).await; diff --git a/tests/common/mod.rs b/tests/common/mod.rs index e687d150..d45a1046 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -13,7 +13,7 @@ pub(crate) use test_counter::{TestCounter, deploy_counter}; use alloy::{network::Ethereum, providers::ProviderBuilder}; use alloy_node_bindings::{Anvil, AnvilInstance}; -use event_scanner::robust_provider::RobustProvider; +use event_scanner::robust_provider::{RobustProvider, RobustProviderBuilder}; pub fn spawn_anvil(block_time: Option) -> anyhow::Result { let mut anvil = Anvil::new(); @@ -27,6 +27,6 @@ pub async fn build_provider(anvil: &AnvilInstance) -> anyhow::Result anyhow::Result + Clone>> { let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; - let mut scanner = - EventScannerBuilder::live().block_confirmations(confirmations).connect(provider.clone()); + let mut scanner = EventScannerBuilder::live() + .block_confirmations(confirmations) + .connect(provider.clone()) + .await?; let stream = scanner.subscribe(filter); @@ -82,7 +84,8 @@ pub async fn setup_sync_scanner( let mut scanner = EventScannerBuilder::sync() .from_block(from) .block_confirmations(confirmations) - .connect(provider.clone()); + .connect(provider.clone()) + .await?; let stream = scanner.subscribe(filter); @@ -100,7 +103,8 @@ pub async fn setup_sync_from_latest_scanner( let mut scanner = EventScannerBuilder::sync() .from_latest(latest) .block_confirmations(confirmations) - .connect(provider.clone()); + .connect(provider.clone()) + .await?; let stream = scanner.subscribe(filter); @@ -114,8 +118,11 @@ pub async fn setup_historic_scanner( to: BlockNumberOrTag, ) -> anyhow::Result + Clone>> { let (anvil, provider, contract, filter) = setup_common(block_interval, filter).await?; - let mut scanner = - EventScannerBuilder::historic().from_block(from).to_block(to).connect(provider.clone()); + let mut scanner = EventScannerBuilder::historic() + .from_block(from) + .to_block(to) + .connect(provider.clone()) + .await?; let stream = scanner.subscribe(filter); @@ -138,7 +145,7 @@ pub async fn setup_latest_scanner( builder = builder.to_block(t); } - let mut scanner = builder.connect(provider.clone()); + let mut scanner = builder.connect(provider.clone()).await?; let stream = scanner.subscribe(filter); diff --git a/tests/latest_events/basic.rs b/tests/latest_events/basic.rs index eb43b8e9..5d07f9ee 100644 --- a/tests/latest_events/basic.rs +++ b/tests/latest_events/basic.rs @@ -100,7 +100,7 @@ async fn latest_scanner_respects_range_subset() -> anyhow::Result<()> { let end = BlockNumberOrTag::from(head); let mut scanner_with_range = - EventScannerBuilder::latest(10).from_block(start).to_block(end).connect(provider); + EventScannerBuilder::latest(10).from_block(start).to_block(end).connect(provider).await?; let mut stream_with_range = scanner_with_range.subscribe(default_filter); scanner_with_range.start().await?; @@ -318,7 +318,7 @@ async fn latest_scanner_large_gaps_and_empty_ranges() -> anyhow::Result<()> { let end = BlockNumberOrTag::from(head); let mut scanner_with_range = - EventScannerBuilder::latest(5).from_block(start).to_block(end).connect(provider); + EventScannerBuilder::latest(5).from_block(start).to_block(end).connect(provider).await?; let mut stream_with_range = scanner_with_range.subscribe(default_filter); scanner_with_range.start().await?; @@ -349,7 +349,7 @@ async fn latest_scanner_boundary_range_single_block() -> anyhow::Result<()> { let end = start; let mut scanner_with_range = - EventScannerBuilder::latest(5).from_block(start).to_block(end).connect(provider); + EventScannerBuilder::latest(5).from_block(start).to_block(end).connect(provider).await?; let mut stream_with_range = scanner_with_range.subscribe(default_filter); scanner_with_range.start().await?; From 48d9863b09109b7e9e66c1b790a323d81053a6bc Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Fri, 7 Nov 2025 14:27:30 +0100 Subject: [PATCH 121/122] feat: dont error out if PP doesn't support pubsub on subscription --- src/robust_provider.rs | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index bdc6d2d2..4427f146 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -339,11 +339,6 @@ impl RobustProvider { /// final provider that was attempted. pub async fn subscribe_blocks(&self) -> Result, Error> { info!("eth_subscribe called"); - // immediately fail if primary does not support pubsub - if !Self::supports_pubsub(self.primary()) { - return Err(RpcError::Transport(TransportErrorKind::PubsubUnavailable).into()); - } - let result = self .retry_with_total_timeout( move |provider| async move { provider.subscribe_blocks().await }, @@ -570,10 +565,8 @@ mod tests { let anvil_2 = Anvil::new().try_spawn()?; - let ws_provider_2 = ProviderBuilder::new() - .connect(anvil_2.ws_endpoint_url().as_str()) - .await - .expect("Failed to connect to WS"); + let ws_provider_2 = + ProviderBuilder::new().connect(anvil_2.ws_endpoint_url().as_str()).await?; let robust = RobustProviderBuilder::fragile(ws_provider_1.clone()) .fallback(ws_provider_2.clone()) @@ -629,8 +622,7 @@ mod tests { let http_provider = ProviderBuilder::new().connect_http(anvil.endpoint_url()); let ws_provider = ProviderBuilder::new() .connect_ws(WsConnect::new(anvil.ws_endpoint_url().as_str())) - .await - .expect("Failed to connect to WS"); + .await?; let robust = RobustProviderBuilder::fragile(http_provider) .fallback(ws_provider) @@ -655,14 +647,12 @@ mod tests { #[tokio::test] async fn test_ws_fails_http_fallback_returns_primary_error() -> anyhow::Result<()> { - let anvil_1 = Anvil::new().try_spawn().expect("Failed to start anvil"); + let anvil_1 = Anvil::new().try_spawn()?; - let ws_provider = ProviderBuilder::new() - .connect_ws(WsConnect::new(anvil_1.ws_endpoint_url().as_str())) - .await - .expect("Failed to connect to WS"); + let ws_provider = + ProviderBuilder::new().connect(anvil_1.ws_endpoint_url().as_str()).await?; - let anvil_2 = Anvil::new().try_spawn().expect("Failed to start anvil"); + let anvil_2 = Anvil::new().try_spawn()?; let http_provider = ProviderBuilder::new().connect_http(anvil_2.endpoint_url()); let robust = RobustProviderBuilder::fragile(ws_provider.clone()) From f65ffaf816ca0d71f0cf9689e4a31435a34427e4 Mon Sep 17 00:00:00 2001 From: 0xNeshi Date: Fri, 7 Nov 2025 14:32:36 +0100 Subject: [PATCH 122/122] test: fix --- src/robust_provider.rs | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/src/robust_provider.rs b/src/robust_provider.rs index 4427f146..bada3fb2 100644 --- a/src/robust_provider.rs +++ b/src/robust_provider.rs @@ -616,7 +616,8 @@ mod tests { } #[tokio::test] - async fn test_subscribe_fails_if_primary_provider_lacks_pubsub() -> anyhow::Result<()> { + async fn test_subscribe_succeeds_if_primary_provider_lacks_pubsub_but_fallback_supports_it() + -> anyhow::Result<()> { let anvil = Anvil::new().try_spawn()?; let http_provider = ProviderBuilder::new().connect_http(anvil.endpoint_url()); @@ -630,17 +631,8 @@ mod tests { .build() .await?; - let err = robust.subscribe_blocks().await.unwrap_err(); - - match err { - Error::RpcError(e) => { - assert!(matches!( - e.as_ref(), - RpcError::Transport(TransportErrorKind::PubsubUnavailable) - )); - } - other => panic!("Expected PubsubUnavailable error type, got: {other:?}"), - } + let result = robust.subscribe_blocks().await; + assert!(result.is_ok()); Ok(()) }